Skip to content

API

FireProx: A schemaless, state-aware proxy library for Google Cloud Firestore.

FireProx provides a simplified, Pythonic interface for working with Firestore during rapid prototyping. It wraps the official google-cloud-firestore client with an intuitive object-oriented API that minimizes boilerplate and aligns with Python's programming paradigms.

Main Components: Synchronous API: FireProx: Main entry point for sync operations FireObject: State-aware proxy for Firestore documents FireCollection: Interface for working with collections

Asynchronous API:
    AsyncFireProx: Main entry point for async operations
    AsyncFireObject: Async state-aware proxy for documents
    AsyncFireCollection: Async interface for collections

Shared:
    State: Enum representing FireObject lifecycle states

Example Usage (Synchronous): from google.cloud import firestore from fire_prox import FireProx

# Initialize
native_client = firestore.Client(project='my-project')
db = FireProx(native_client)

# Create a document
users = db.collection('users')
user = users.new()
user.name = 'Ada Lovelace'
user.year = 1815
user.save()

# Read a document (lazy loading)
user = db.doc('users/alovelace')
print(user.name)  # Automatically fetches data

# Update a document
user.year = 1816
user.save()

# Delete a document
user.delete()

Example Usage (Asynchronous): from google.cloud import firestore from fire_prox import AsyncFireProx

# Initialize
native_client = firestore.AsyncClient(project='my-project')
db = AsyncFireProx(native_client)

# Create a document
users = db.collection('users')
user = users.new()
user.name = 'Ada Lovelace'
user.year = 1815
await user.save()

# Read a document (explicit fetch required)
user = db.doc('users/alovelace')
await user.fetch()
print(user.name)

# Update a document
user.year = 1816
await user.save()

# Delete a document
await user.delete()

AsyncFireCollection

Bases: BaseFireCollection

A wrapper around Firestore AsyncCollectionReference for document management.

AsyncFireCollection provides a simplified interface for creating new documents and querying collections asynchronously.

Usage Examples: # Get a collection users = db.collection('users')

# Create a new document in DETACHED state
new_user = users.new()
new_user.name = 'Ada Lovelace'
new_user.year = 1815
await new_user.save()

# Create with explicit ID
user = users.new()
user.name = 'Charles Babbage'
await user.save(doc_id='cbabbage')

# Phase 2: Query the collection
query = users.where('year', '>', 1800).limit(10)
async for user in query.get():
    print(user.name)
Source code in src/fire_prox/async_fire_collection.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
class AsyncFireCollection(BaseFireCollection):
    """
    A wrapper around Firestore AsyncCollectionReference for document management.

    AsyncFireCollection provides a simplified interface for creating new documents
    and querying collections asynchronously.

    Usage Examples:
        # Get a collection
        users = db.collection('users')

        # Create a new document in DETACHED state
        new_user = users.new()
        new_user.name = 'Ada Lovelace'
        new_user.year = 1815
        await new_user.save()

        # Create with explicit ID
        user = users.new()
        user.name = 'Charles Babbage'
        await user.save(doc_id='cbabbage')

        # Phase 2: Query the collection
        query = users.where('year', '>', 1800).limit(10)
        async for user in query.get():
            print(user.name)
    """

    # =========================================================================
    # Document Creation
    # =========================================================================

    def _instantiate_object(
        self,
        *,
        doc_ref: Any,
        initial_state: State,
        parent_collection: 'AsyncFireCollection',
        sync_doc_ref: Optional[Any] = None,
        sync_client: Optional[Any] = None,
        **_: Any,
    ) -> AsyncFireObject:
        """Instantiate the asynchronous FireObject wrapper."""
        return AsyncFireObject(
            doc_ref=doc_ref,
            sync_doc_ref=sync_doc_ref,
            sync_client=sync_client,
            initial_state=initial_state,
            parent_collection=parent_collection,
        )

    def _get_new_kwargs(self) -> dict[str, Any]:
        return {'sync_client': self._sync_client}

    def _get_doc_kwargs(self, doc_id: str) -> dict[str, Any]:
        sync_doc_ref = None
        if self._sync_client is not None:
            sync_doc_ref = self._sync_client.collection(self.path).document(doc_id)
        return {'sync_doc_ref': sync_doc_ref, 'sync_client': self._sync_client}

    def new(self) -> AsyncFireObject:
        """Create a new AsyncFireObject in DETACHED state."""
        return super().new()

    def doc(self, doc_id: str) -> AsyncFireObject:
        """Get a reference to a specific document in this collection."""
        return super().doc(doc_id)

    # =========================================================================
    # Properties (inherited from BaseFireCollection)
    # =========================================================================

    @property
    def parent(self) -> Optional[AsyncFireObject]:
        """
        Get the parent document if this is a subcollection.

        Phase 2 feature.

        Returns:
            AsyncFireObject representing the parent document if this is a
            subcollection, None if this is a root-level collection.
        """
        raise NotImplementedError("Phase 2 feature - subcollections")

    # =========================================================================
    # Query Methods (Phase 2)
    # =========================================================================

    def where(self, field: str, op: str, value: Any) -> 'AsyncFireQuery':
        """
        Create a query with a filter condition.

        Phase 2.5 feature. Builds a lightweight query for common filtering needs.

        Args:
            field: The field path to filter on.
            op: Comparison operator.
            value: The value to compare against.

        Returns:
            An AsyncFireQuery instance for method chaining.

        Example:
            query = users.where('birth_year', '>', 1800)
                        .where('country', '==', 'UK')
                        .limit(10)
            async for user in query.stream():
                print(user.name)
        """
        from google.cloud.firestore_v1.base_query import FieldFilter

        from .async_fire_query import AsyncFireQuery

        # Create initial query with filter
        filter_obj = FieldFilter(field, op, value)
        native_query = self._collection_ref.where(filter=filter_obj)
        return AsyncFireQuery(native_query, parent_collection=self)

    def order_by(
        self,
        field: str,
        direction: str = 'ASCENDING'
    ) -> 'AsyncFireQuery':
        """
        Create a query with ordering.

        Phase 2.5 feature.

        Args:
            field: The field path to order by.
            direction: 'ASCENDING' or 'DESCENDING'.

        Returns:
            An AsyncFireQuery instance for method chaining.
        """
        from google.cloud.firestore_v1 import Query as QueryClass

        from .async_fire_query import AsyncFireQuery

        # Convert direction string to constant
        if direction.upper() == 'ASCENDING':
            direction_const = QueryClass.ASCENDING
        elif direction.upper() == 'DESCENDING':
            direction_const = QueryClass.DESCENDING
        else:
            raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

        # Create query with ordering
        native_query = self._collection_ref.order_by(field, direction=direction_const)
        return AsyncFireQuery(native_query, parent_collection=self)

    def limit(self, count: int) -> 'AsyncFireQuery':
        """
        Create a query with a result limit.

        Phase 2.5 feature.

        Args:
            count: Maximum number of results to return.

        Returns:
            An AsyncFireQuery instance for method chaining.
        """
        from .async_fire_query import AsyncFireQuery

        if count <= 0:
            raise ValueError(f"Limit count must be positive, got {count}")

        # Create query with limit
        native_query = self._collection_ref.limit(count)
        return AsyncFireQuery(native_query, parent_collection=self)

    def select(self, *field_paths: str) -> 'AsyncFireQuery':
        """
        Create a query with field projection.

        Phase 4 Part 3 feature. Selects specific fields to return in query results.
        Returns vanilla dictionaries instead of AsyncFireObject instances.

        Args:
            *field_paths: One or more field paths to select.

        Returns:
            An AsyncFireQuery instance with projection applied.

        Example:
            # Select specific fields
            results = await users.select('name', 'email').get()
            # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]
        """
        from .async_fire_query import AsyncFireQuery

        if not field_paths:
            raise ValueError("select() requires at least one field path")

        # Create query with projection
        native_query = self._collection_ref.select(list(field_paths))
        return AsyncFireQuery(native_query, parent_collection=self, projection=field_paths)

    async def get_all(self) -> AsyncIterator[AsyncFireObject]:
        """
        Retrieve all documents in the collection.

        Phase 2.5 feature. Returns an async iterator of all documents.

        Yields:
            AsyncFireObject instances in LOADED state for each document.

        Example:
            async for user in users.get_all():
                print(f"{user.name}: {user.year}")
        """
        # Stream all documents from the collection
        async for snapshot in self._collection_ref.stream():
            yield AsyncFireObject.from_snapshot(snapshot, parent_collection=self)

    # =========================================================================
    # Vector Query Methods
    # =========================================================================

    def find_nearest(
        self,
        vector_field: str,
        query_vector: Any,
        distance_measure: Any,
        limit: int,
        distance_result_field: Optional[str] = None,
    ) -> 'AsyncFireQuery':
        """
        Find the nearest neighbors based on vector similarity.

        Performs a vector similarity search to find documents with embeddings
        nearest to the query vector. Requires a single-field vector index on
        the vector_field.

        Args:
            vector_field: Name of the field containing vector embeddings.
            query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
            distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
                DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
            limit: Maximum number of nearest neighbors to return (max 1000).
            distance_result_field: Optional field name to store the calculated distance
                in the query results.

        Returns:
            An AsyncFireQuery instance for method chaining and execution.

        Example:
            from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
            from google.cloud.firestore_v1.vector import Vector

            collection = db.collection("documents")
            query = collection.find_nearest(
                vector_field="embedding",
                query_vector=Vector([0.1, 0.2, 0.3]),
                distance_measure=DistanceMeasure.EUCLIDEAN,
                limit=5
            )
            async for doc in query.stream():
                print(f"{doc.title}: {doc.embedding}")

        Note:
            - Requires a vector index on the vector_field
            - Maximum limit is 1000 documents
            - Can be combined with where() for pre-filtering (requires composite index)
            - Does not work with Firestore emulator (production only)
        """
        from .async_fire_query import AsyncFireQuery

        # Create vector query using native find_nearest
        native_query = self._collection_ref.find_nearest(
            vector_field=vector_field,
            query_vector=query_vector,
            distance_measure=distance_measure,
            limit=limit,
            distance_result_field=distance_result_field,
        )
        return AsyncFireQuery(native_query, parent_collection=self)

    # =========================================================================
    # Aggregation Methods (Phase 4 Part 5)
    # =========================================================================

    async def count(self) -> int:
        """
        Count documents in the collection.

        Phase 4 Part 5 feature. Returns the total count of documents
        without fetching their data.

        Returns:
            The number of documents in the collection.

        Example:
            total = await users.count()
            print(f"Total users: {total}")
        """
        from .async_fire_query import AsyncFireQuery
        # Use collection reference directly as a query for aggregation
        query = AsyncFireQuery(self._collection_ref, parent_collection=self)
        return await query.count()

    async def sum(self, field: str):
        """
        Sum a numeric field across all documents.

        Phase 4 Part 5 feature. Calculates the sum of a numeric field
        without fetching document data.

        Args:
            field: The field name to sum.

        Returns:
            The sum of the field values (int or float).

        Example:
            total_revenue = await orders.sum('amount')
        """
        from .async_fire_query import AsyncFireQuery
        # Use collection reference directly as a query for aggregation
        query = AsyncFireQuery(self._collection_ref, parent_collection=self)
        return await query.sum(field)

    async def avg(self, field: str) -> float:
        """
        Average a numeric field across all documents.

        Phase 4 Part 5 feature. Calculates the average of a numeric field
        without fetching document data.

        Args:
            field: The field name to average.

        Returns:
            The average of the field values (float).

        Example:
            avg_rating = await products.avg('rating')
        """
        from .async_fire_query import AsyncFireQuery
        # Use collection reference directly as a query for aggregation
        query = AsyncFireQuery(self._collection_ref, parent_collection=self)
        return await query.avg(field)

    async def aggregate(self, **aggregations):
        """
        Execute multiple aggregations in a single query.

        Phase 4 Part 5 feature. Performs multiple aggregation operations
        (count, sum, avg) in one efficient query.

        Args:
            **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

        Returns:
            Dictionary mapping aggregation names to their results.

        Example:
            from fire_prox import Count, Sum, Avg

            stats = await users.aggregate(
                total=Count(),
                total_score=Sum('score'),
                avg_age=Avg('age')
            )
            # Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
        """
        from .async_fire_query import AsyncFireQuery
        # Use collection reference directly as a query for aggregation
        query = AsyncFireQuery(self._collection_ref, parent_collection=self)
        return await query.aggregate(**aggregations)

    # =========================================================================
    # Collection Deletion
    # =========================================================================

    async def delete_all(
        self,
        *,
        batch_size: int = 50,
        recursive: bool = True,
        dry_run: bool = False,
    ) -> Dict[str, int]:
        """
        Delete every document in this collection asynchronously.

        Firestore does not expose a server-side "drop collection" operation.
        This helper batches document deletes and, when recursive is True
        (default), also clears any nested subcollections before removing
        the parent document.

        Args:
            batch_size: Maximum number of deletes per commit.
            recursive: Whether to delete nested subcollections.
            dry_run: Count affected documents without executing writes.

        Returns:
            Dictionary with counts for deleted documents and subcollections
            visited during recursion.

        Raises:
            ValueError: If batch_size is not positive.
        """
        self._validate_batch_size(batch_size)

        return await self._delete_collection_recursive(
            collection_ref=self._collection_ref,
            batch_size=batch_size,
            recursive=recursive,
            dry_run=dry_run,
            include_self=False,
        )

    async def _delete_collection_recursive(
        self,
        *,
        collection_ref: Any,
        batch_size: int,
        recursive: bool,
        dry_run: bool,
        include_self: bool,
    ) -> Dict[str, int]:
        """Internal helper to delete documents within an async collection reference."""
        client = collection_ref._client
        stats = {'documents': 0, 'collections': 1 if include_self else 0}
        batch = None if dry_run else client.batch()
        ops_in_batch = 0

        async for doc_ref in collection_ref.list_documents(page_size=batch_size):
            if recursive:
                sub_stats = await self._delete_document_subcollections(
                    doc_ref,
                    batch_size=batch_size,
                    recursive=recursive,
                    dry_run=dry_run,
                )
                stats['documents'] += sub_stats['documents']
                stats['collections'] += sub_stats['collections']

            if not dry_run and batch is not None:
                batch.delete(doc_ref)
                ops_in_batch += 1

            stats['documents'] += 1

            if not dry_run and batch is not None and ops_in_batch >= batch_size:
                await batch.commit()
                batch = client.batch()
                ops_in_batch = 0

        if not dry_run and batch is not None and ops_in_batch:
            await batch.commit()

        return stats

    async def _delete_document_subcollections(
        self,
        doc_ref: Any,
        *,
        batch_size: int,
        recursive: bool,
        dry_run: bool,
    ) -> Dict[str, int]:
        """Delete all subcollections hanging off an async document reference."""
        stats = {'documents': 0, 'collections': 0}

        async for subcollection_ref in doc_ref.collections():
            sub_stats = await self._delete_collection_recursive(
                collection_ref=subcollection_ref,
                batch_size=batch_size,
                recursive=recursive,
                dry_run=dry_run,
                include_self=True,
            )
            stats['documents'] += sub_stats['documents']
            stats['collections'] += sub_stats['collections']

        return stats

parent property

Get the parent document if this is a subcollection.

Phase 2 feature.

Returns: AsyncFireObject representing the parent document if this is a subcollection, None if this is a root-level collection.

aggregate(**aggregations) async

Execute multiple aggregations in a single query.

Phase 4 Part 5 feature. Performs multiple aggregation operations (count, sum, avg) in one efficient query.

Args: **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

Returns: Dictionary mapping aggregation names to their results.

Example: from fire_prox import Count, Sum, Avg

stats = await users.aggregate(
    total=Count(),
    total_score=Sum('score'),
    avg_age=Avg('age')
)
# Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
Source code in src/fire_prox/async_fire_collection.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
async def aggregate(self, **aggregations):
    """
    Execute multiple aggregations in a single query.

    Phase 4 Part 5 feature. Performs multiple aggregation operations
    (count, sum, avg) in one efficient query.

    Args:
        **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

    Returns:
        Dictionary mapping aggregation names to their results.

    Example:
        from fire_prox import Count, Sum, Avg

        stats = await users.aggregate(
            total=Count(),
            total_score=Sum('score'),
            avg_age=Avg('age')
        )
        # Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
    """
    from .async_fire_query import AsyncFireQuery
    # Use collection reference directly as a query for aggregation
    query = AsyncFireQuery(self._collection_ref, parent_collection=self)
    return await query.aggregate(**aggregations)

avg(field) async

Average a numeric field across all documents.

Phase 4 Part 5 feature. Calculates the average of a numeric field without fetching document data.

Args: field: The field name to average.

Returns: The average of the field values (float).

Example: avg_rating = await products.avg('rating')

Source code in src/fire_prox/async_fire_collection.py
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
async def avg(self, field: str) -> float:
    """
    Average a numeric field across all documents.

    Phase 4 Part 5 feature. Calculates the average of a numeric field
    without fetching document data.

    Args:
        field: The field name to average.

    Returns:
        The average of the field values (float).

    Example:
        avg_rating = await products.avg('rating')
    """
    from .async_fire_query import AsyncFireQuery
    # Use collection reference directly as a query for aggregation
    query = AsyncFireQuery(self._collection_ref, parent_collection=self)
    return await query.avg(field)

count() async

Count documents in the collection.

Phase 4 Part 5 feature. Returns the total count of documents without fetching their data.

Returns: The number of documents in the collection.

Example: total = await users.count() print(f"Total users: {total}")

Source code in src/fire_prox/async_fire_collection.py
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
async def count(self) -> int:
    """
    Count documents in the collection.

    Phase 4 Part 5 feature. Returns the total count of documents
    without fetching their data.

    Returns:
        The number of documents in the collection.

    Example:
        total = await users.count()
        print(f"Total users: {total}")
    """
    from .async_fire_query import AsyncFireQuery
    # Use collection reference directly as a query for aggregation
    query = AsyncFireQuery(self._collection_ref, parent_collection=self)
    return await query.count()

delete_all(*, batch_size=50, recursive=True, dry_run=False) async

Delete every document in this collection asynchronously.

Firestore does not expose a server-side "drop collection" operation. This helper batches document deletes and, when recursive is True (default), also clears any nested subcollections before removing the parent document.

Args: batch_size: Maximum number of deletes per commit. recursive: Whether to delete nested subcollections. dry_run: Count affected documents without executing writes.

Returns: Dictionary with counts for deleted documents and subcollections visited during recursion.

Raises: ValueError: If batch_size is not positive.

Source code in src/fire_prox/async_fire_collection.py
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
async def delete_all(
    self,
    *,
    batch_size: int = 50,
    recursive: bool = True,
    dry_run: bool = False,
) -> Dict[str, int]:
    """
    Delete every document in this collection asynchronously.

    Firestore does not expose a server-side "drop collection" operation.
    This helper batches document deletes and, when recursive is True
    (default), also clears any nested subcollections before removing
    the parent document.

    Args:
        batch_size: Maximum number of deletes per commit.
        recursive: Whether to delete nested subcollections.
        dry_run: Count affected documents without executing writes.

    Returns:
        Dictionary with counts for deleted documents and subcollections
        visited during recursion.

    Raises:
        ValueError: If batch_size is not positive.
    """
    self._validate_batch_size(batch_size)

    return await self._delete_collection_recursive(
        collection_ref=self._collection_ref,
        batch_size=batch_size,
        recursive=recursive,
        dry_run=dry_run,
        include_self=False,
    )

doc(doc_id)

Get a reference to a specific document in this collection.

Source code in src/fire_prox/async_fire_collection.py
82
83
84
def doc(self, doc_id: str) -> AsyncFireObject:
    """Get a reference to a specific document in this collection."""
    return super().doc(doc_id)

find_nearest(vector_field, query_vector, distance_measure, limit, distance_result_field=None)

Find the nearest neighbors based on vector similarity.

Performs a vector similarity search to find documents with embeddings nearest to the query vector. Requires a single-field vector index on the vector_field.

Args: vector_field: Name of the field containing vector embeddings. query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector). distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN, DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT). limit: Maximum number of nearest neighbors to return (max 1000). distance_result_field: Optional field name to store the calculated distance in the query results.

Returns: An AsyncFireQuery instance for method chaining and execution.

Example: from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector

collection = db.collection("documents")
query = collection.find_nearest(
    vector_field="embedding",
    query_vector=Vector([0.1, 0.2, 0.3]),
    distance_measure=DistanceMeasure.EUCLIDEAN,
    limit=5
)
async for doc in query.stream():
    print(f"{doc.title}: {doc.embedding}")

Note: - Requires a vector index on the vector_field - Maximum limit is 1000 documents - Can be combined with where() for pre-filtering (requires composite index) - Does not work with Firestore emulator (production only)

Source code in src/fire_prox/async_fire_collection.py
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def find_nearest(
    self,
    vector_field: str,
    query_vector: Any,
    distance_measure: Any,
    limit: int,
    distance_result_field: Optional[str] = None,
) -> 'AsyncFireQuery':
    """
    Find the nearest neighbors based on vector similarity.

    Performs a vector similarity search to find documents with embeddings
    nearest to the query vector. Requires a single-field vector index on
    the vector_field.

    Args:
        vector_field: Name of the field containing vector embeddings.
        query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
        distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
            DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
        limit: Maximum number of nearest neighbors to return (max 1000).
        distance_result_field: Optional field name to store the calculated distance
            in the query results.

    Returns:
        An AsyncFireQuery instance for method chaining and execution.

    Example:
        from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
        from google.cloud.firestore_v1.vector import Vector

        collection = db.collection("documents")
        query = collection.find_nearest(
            vector_field="embedding",
            query_vector=Vector([0.1, 0.2, 0.3]),
            distance_measure=DistanceMeasure.EUCLIDEAN,
            limit=5
        )
        async for doc in query.stream():
            print(f"{doc.title}: {doc.embedding}")

    Note:
        - Requires a vector index on the vector_field
        - Maximum limit is 1000 documents
        - Can be combined with where() for pre-filtering (requires composite index)
        - Does not work with Firestore emulator (production only)
    """
    from .async_fire_query import AsyncFireQuery

    # Create vector query using native find_nearest
    native_query = self._collection_ref.find_nearest(
        vector_field=vector_field,
        query_vector=query_vector,
        distance_measure=distance_measure,
        limit=limit,
        distance_result_field=distance_result_field,
    )
    return AsyncFireQuery(native_query, parent_collection=self)

get_all() async

Retrieve all documents in the collection.

Phase 2.5 feature. Returns an async iterator of all documents.

Yields: AsyncFireObject instances in LOADED state for each document.

Example: async for user in users.get_all(): print(f"{user.name}: {user.year}")

Source code in src/fire_prox/async_fire_collection.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
async def get_all(self) -> AsyncIterator[AsyncFireObject]:
    """
    Retrieve all documents in the collection.

    Phase 2.5 feature. Returns an async iterator of all documents.

    Yields:
        AsyncFireObject instances in LOADED state for each document.

    Example:
        async for user in users.get_all():
            print(f"{user.name}: {user.year}")
    """
    # Stream all documents from the collection
    async for snapshot in self._collection_ref.stream():
        yield AsyncFireObject.from_snapshot(snapshot, parent_collection=self)

limit(count)

Create a query with a result limit.

Phase 2.5 feature.

Args: count: Maximum number of results to return.

Returns: An AsyncFireQuery instance for method chaining.

Source code in src/fire_prox/async_fire_collection.py
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def limit(self, count: int) -> 'AsyncFireQuery':
    """
    Create a query with a result limit.

    Phase 2.5 feature.

    Args:
        count: Maximum number of results to return.

    Returns:
        An AsyncFireQuery instance for method chaining.
    """
    from .async_fire_query import AsyncFireQuery

    if count <= 0:
        raise ValueError(f"Limit count must be positive, got {count}")

    # Create query with limit
    native_query = self._collection_ref.limit(count)
    return AsyncFireQuery(native_query, parent_collection=self)

new()

Create a new AsyncFireObject in DETACHED state.

Source code in src/fire_prox/async_fire_collection.py
78
79
80
def new(self) -> AsyncFireObject:
    """Create a new AsyncFireObject in DETACHED state."""
    return super().new()

order_by(field, direction='ASCENDING')

Create a query with ordering.

Phase 2.5 feature.

Args: field: The field path to order by. direction: 'ASCENDING' or 'DESCENDING'.

Returns: An AsyncFireQuery instance for method chaining.

Source code in src/fire_prox/async_fire_collection.py
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def order_by(
    self,
    field: str,
    direction: str = 'ASCENDING'
) -> 'AsyncFireQuery':
    """
    Create a query with ordering.

    Phase 2.5 feature.

    Args:
        field: The field path to order by.
        direction: 'ASCENDING' or 'DESCENDING'.

    Returns:
        An AsyncFireQuery instance for method chaining.
    """
    from google.cloud.firestore_v1 import Query as QueryClass

    from .async_fire_query import AsyncFireQuery

    # Convert direction string to constant
    if direction.upper() == 'ASCENDING':
        direction_const = QueryClass.ASCENDING
    elif direction.upper() == 'DESCENDING':
        direction_const = QueryClass.DESCENDING
    else:
        raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

    # Create query with ordering
    native_query = self._collection_ref.order_by(field, direction=direction_const)
    return AsyncFireQuery(native_query, parent_collection=self)

select(*field_paths)

Create a query with field projection.

Phase 4 Part 3 feature. Selects specific fields to return in query results. Returns vanilla dictionaries instead of AsyncFireObject instances.

Args: *field_paths: One or more field paths to select.

Returns: An AsyncFireQuery instance with projection applied.

Example: # Select specific fields results = await users.select('name', 'email').get() # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]

Source code in src/fire_prox/async_fire_collection.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
def select(self, *field_paths: str) -> 'AsyncFireQuery':
    """
    Create a query with field projection.

    Phase 4 Part 3 feature. Selects specific fields to return in query results.
    Returns vanilla dictionaries instead of AsyncFireObject instances.

    Args:
        *field_paths: One or more field paths to select.

    Returns:
        An AsyncFireQuery instance with projection applied.

    Example:
        # Select specific fields
        results = await users.select('name', 'email').get()
        # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]
    """
    from .async_fire_query import AsyncFireQuery

    if not field_paths:
        raise ValueError("select() requires at least one field path")

    # Create query with projection
    native_query = self._collection_ref.select(list(field_paths))
    return AsyncFireQuery(native_query, parent_collection=self, projection=field_paths)

sum(field) async

Sum a numeric field across all documents.

Phase 4 Part 5 feature. Calculates the sum of a numeric field without fetching document data.

Args: field: The field name to sum.

Returns: The sum of the field values (int or float).

Example: total_revenue = await orders.sum('amount')

Source code in src/fire_prox/async_fire_collection.py
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
async def sum(self, field: str):
    """
    Sum a numeric field across all documents.

    Phase 4 Part 5 feature. Calculates the sum of a numeric field
    without fetching document data.

    Args:
        field: The field name to sum.

    Returns:
        The sum of the field values (int or float).

    Example:
        total_revenue = await orders.sum('amount')
    """
    from .async_fire_query import AsyncFireQuery
    # Use collection reference directly as a query for aggregation
    query = AsyncFireQuery(self._collection_ref, parent_collection=self)
    return await query.sum(field)

where(field, op, value)

Create a query with a filter condition.

Phase 2.5 feature. Builds a lightweight query for common filtering needs.

Args: field: The field path to filter on. op: Comparison operator. value: The value to compare against.

Returns: An AsyncFireQuery instance for method chaining.

Example: query = users.where('birth_year', '>', 1800) .where('country', '==', 'UK') .limit(10) async for user in query.stream(): print(user.name)

Source code in src/fire_prox/async_fire_collection.py
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
def where(self, field: str, op: str, value: Any) -> 'AsyncFireQuery':
    """
    Create a query with a filter condition.

    Phase 2.5 feature. Builds a lightweight query for common filtering needs.

    Args:
        field: The field path to filter on.
        op: Comparison operator.
        value: The value to compare against.

    Returns:
        An AsyncFireQuery instance for method chaining.

    Example:
        query = users.where('birth_year', '>', 1800)
                    .where('country', '==', 'UK')
                    .limit(10)
        async for user in query.stream():
            print(user.name)
    """
    from google.cloud.firestore_v1.base_query import FieldFilter

    from .async_fire_query import AsyncFireQuery

    # Create initial query with filter
    filter_obj = FieldFilter(field, op, value)
    native_query = self._collection_ref.where(filter=filter_obj)
    return AsyncFireQuery(native_query, parent_collection=self)

AsyncFireObject

Bases: BaseFireObject

Asynchronous schemaless, state-aware proxy for a Firestore document.

AsyncFireObject provides an object-oriented interface to Firestore documents using the async/await pattern for all I/O operations.

Lazy Loading: AsyncFireObject supports lazy loading via automatic fetch on attribute access. When accessing an attribute on an ATTACHED object, it will automatically fetch data from Firestore (using a synchronous thread to run the async fetch). This happens once per object - subsequent accesses are instant dict lookups.

Usage Examples: # Create a new document (DETACHED state) user = collection.new() user.name = 'Ada Lovelace' user.year = 1815 await user.save() # Transitions to LOADED

# Load existing document with lazy loading (automatic fetch)
user = db.doc('users/alovelace')  # ATTACHED state
print(user.name)  # Automatically fetches data, transitions to LOADED

# Or explicitly fetch if preferred
user = db.doc('users/alovelace')
await user.fetch()  # Explicit async fetch
print(user.name)

# Update and save
user.year = 1816
await user.save()

# Delete
await user.delete()
Source code in src/fire_prox/async_fire_object.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
class AsyncFireObject(BaseFireObject):
    """
    Asynchronous schemaless, state-aware proxy for a Firestore document.

    AsyncFireObject provides an object-oriented interface to Firestore documents
    using the async/await pattern for all I/O operations.

    Lazy Loading: AsyncFireObject supports lazy loading via automatic fetch on
    attribute access. When accessing an attribute on an ATTACHED object, it will
    automatically fetch data from Firestore (using a synchronous thread to run
    the async fetch). This happens once per object - subsequent accesses are
    instant dict lookups.

    Usage Examples:
        # Create a new document (DETACHED state)
        user = collection.new()
        user.name = 'Ada Lovelace'
        user.year = 1815
        await user.save()  # Transitions to LOADED

        # Load existing document with lazy loading (automatic fetch)
        user = db.doc('users/alovelace')  # ATTACHED state
        print(user.name)  # Automatically fetches data, transitions to LOADED

        # Or explicitly fetch if preferred
        user = db.doc('users/alovelace')
        await user.fetch()  # Explicit async fetch
        print(user.name)

        # Update and save
        user.year = 1816
        await user.save()

        # Delete
        await user.delete()
    """

    # =========================================================================
    # Firestore I/O Hooks
    # =========================================================================

    async def _get_snapshot(self, transaction: Optional[Any] = None) -> DocumentSnapshot:
        """Retrieve a document snapshot using the async client."""
        if transaction is not None:
            return await self._doc_ref.get(transaction=transaction)
        return await self._doc_ref.get()

    def _create_document(self, doc_id: Optional[str] = None) -> AsyncDocumentReference:
        """Create a new async document reference for DETACHED saves."""
        if not self._parent_collection:
            raise ValueError("DETACHED object has no parent collection")

        collection_ref = self._parent_collection._collection_ref
        if doc_id:
            doc_ref = collection_ref.document(doc_id)
        else:
            doc_ref = collection_ref.document()

        object.__setattr__(self, '_doc_ref', doc_ref)

        if self._sync_client is not None:
            sync_ref = self._sync_client.document(doc_ref.path)
            object.__setattr__(self, '_sync_doc_ref', sync_ref)

        return doc_ref

    async def _write_set(
        self,
        data: Dict[str, Any],
        doc_ref: Optional[AsyncDocumentReference] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> None:
        """Persist data via a set call on the async client."""
        target_ref = doc_ref or self._doc_ref

        if transaction is not None:
            transaction.set(target_ref, data)
        elif batch is not None:
            batch.set(target_ref, data)
        else:
            await target_ref.set(data)

    async def _write_update(
        self,
        update_dict: Dict[str, Any],
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> None:
        """Perform an update operation using the async client."""
        if transaction is not None:
            transaction.update(self._doc_ref, update_dict)
        elif batch is not None:
            batch.update(self._doc_ref, update_dict)
        else:
            await self._doc_ref.update(update_dict)

    async def _write_delete(self, batch: Optional[Any] = None) -> None:
        """Delete the document using the async client."""
        if batch is not None:
            batch.delete(self._doc_ref)
        else:
            await self._doc_ref.delete()

    def __getattr__(self, name: str) -> Any:
        """
        Handle attribute access for document fields with lazy loading.

        This method implements lazy loading: if the object is in ATTACHED state,
        accessing any data attribute will automatically trigger a synchronous fetch
        to load the data from Firestore using a companion sync client.

        This fetch happens **once per object** - after the first attribute access,
        the object transitions to LOADED state and subsequent accesses are instant
        dict lookups.

        Args:
            name: The attribute name being accessed.

        Returns:
            The value of the field from the internal _data cache.

        Raises:
            AttributeError: If the attribute doesn't exist in _data after
                           fetching (if necessary).
            NotFound: If document doesn't exist in Firestore (during lazy load).

        State Transitions:
            ATTACHED -> LOADED: Automatically fetches data on first access.

        Example:
            user = db.doc('users/alovelace')  # ATTACHED
            name = user.name  # Triggers sync fetch, transitions to LOADED
            year = user.year  # No fetch needed, already LOADED
        """
        if name in self._INTERNAL_ATTRS:
            raise AttributeError(f"Internal attribute {name} not set")

        # If we're in ATTACHED state, trigger lazy loading via sync fetch
        if self._state == State.ATTACHED and self._sync_doc_ref:
            # Use sync doc ref for lazy loading (synchronous fetch)
            snapshot = self._sync_doc_ref.get()

            if not snapshot.exists:
                raise NotFound(f"Document {self._sync_doc_ref.path} does not exist")

            # Get data and convert special types (DocumentReference → FireObject, etc.)
            data = snapshot.to_dict() or {}
            converted_data = {}
            sync_client = (
                self._sync_doc_ref._client
                if hasattr(self, '_sync_doc_ref') and self._sync_doc_ref
                else None
            )
            for key, value in data.items():
                converted_data[key] = self._convert_snapshot_value_for_retrieval(
                    value,
                    is_async=True,
                    sync_client=sync_client,
                )

            # Transition to LOADED with converted data
            self._transition_to_loaded(converted_data)

        return self._materialize_field(name)

    # =========================================================================
    # Async Lifecycle Methods
    # =========================================================================

    async def fetch(self, force: bool = False, transaction: Optional[Any] = None) -> 'AsyncFireObject':
        """
        Fetch document data from Firestore asynchronously.

        Args:
            force: If True, fetch data even if already LOADED.
            transaction: Optional transaction object for transactional reads.

        Returns:
            Self, to allow method chaining.

        Raises:
            ValueError: If called on DETACHED object.
            RuntimeError: If called on DELETED object.
            NotFound: If document doesn't exist.

        State Transitions:
            ATTACHED -> LOADED
            LOADED -> LOADED (if force=True)

        Example:
            # Normal fetch
            user = db.doc('users/alovelace')  # ATTACHED
            await user.fetch()  # Now LOADED

            # Transactional fetch
            transaction = db.transaction()
            @firestore.async_transactional
            async def read_user(transaction):
                await user.fetch(transaction=transaction)
                return user.credits
            credits = await read_user(transaction)
        """
        if self._should_skip_fetch(force):
            return self

        snapshot = await self._get_snapshot(transaction)
        self._process_snapshot(snapshot, is_async=True)

        return self

    async def save(
        self,
        doc_id: Optional[str] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> 'AsyncFireObject':
        """
        Save the object's data to Firestore asynchronously.

        Args:
            doc_id: Optional custom document ID for DETACHED objects.
            transaction: Optional transaction object for transactional writes.
            batch: Optional batch object for batched writes. If provided,
                  the write will be accumulated in the batch (committed later).

        Returns:
            Self, to allow method chaining.

        Raises:
            RuntimeError: If called on DELETED object.
            ValueError: If DETACHED without parent_collection, or if
                       trying to create a new document within a transaction or batch.

        State Transitions:
            DETACHED -> LOADED (creates new document)
            LOADED -> LOADED (updates if dirty)

        Example:
            # Normal save
            user = collection.new()
            user.name = 'Ada'
            await user.save(doc_id='alovelace')

            # Transactional save
            transaction = db.transaction()
            @firestore.async_transactional
            async def update_user(transaction):
                await user.fetch(transaction=transaction)
                user.credits += 10
                await user.save(transaction=transaction)
            await update_user(transaction)

            # Batch save
            batch = db.batch()
            user1.save(batch=batch)
            user2.save(batch=batch)
            await batch.commit()  # Commit all operations
        """
        self._validate_not_deleted("save()")

        if self._state == State.DETACHED:
            doc_ref, storage_data = self._prepare_detached_save(doc_id, transaction, batch)
            await self._write_set(storage_data, doc_ref=doc_ref)
            object.__setattr__(self, '_state', State.LOADED)
            self._mark_clean()
            return self

        if self._state == State.LOADED:
            if not self.is_dirty():
                return self

            update_dict = self._build_update_dict()
            await self._write_update(update_dict, transaction=transaction, batch=batch)
            self._mark_clean()
            return self

        if self._state == State.ATTACHED:
            storage_data = self._prepare_data_for_storage()
            await self._write_set(storage_data, transaction=transaction, batch=batch)
            object.__setattr__(self, '_state', State.LOADED)
            self._mark_clean()
            return self

        return self

    async def collections(self, names_only: bool = False) -> List[Any]:
        """
        List subcollections beneath this document asynchronously.

        Args:
            names_only: When True, return collection IDs instead of wrappers.

        Returns:
            List of subcollection names or AsyncFireCollection wrappers.
        """
        self._validate_not_detached("collections()")
        self._validate_not_deleted("collections()")

        results: List[Any] = []
        async for subcollection_ref in self._doc_ref.collections():
            if names_only:
                results.append(subcollection_ref.id)
            else:
                results.append(self.collection(subcollection_ref.id))
        return results

    async def delete(
        self,
        batch: Optional[Any] = None,
        *,
        recursive: bool = True,
        batch_size: int = 50,
    ) -> None:
        """
        Delete the document from Firestore asynchronously.

        Args:
            batch: Optional batch object for batched deletes. If provided,
                  the delete will be accumulated in the batch (committed later).
            recursive: When True (default), delete all subcollections first.
            batch_size: Batch size to use for recursive subcollection cleanup.

        Raises:
            ValueError: If called on DETACHED object.
            RuntimeError: If called on DELETED object.
            ValueError: If recursive deletion is requested while using a batch.

        State Transitions:
            ATTACHED -> DELETED
            LOADED -> DELETED

        Example:
            user = db.doc('users/alovelace')
            await user.delete()

            # Batch delete
            batch = db.batch()
            user1.delete(batch=batch, recursive=False)
            user2.delete(batch=batch, recursive=False)
            await batch.commit()  # Commit all operations
        """
        if recursive:
            if batch is not None:
                raise ValueError("Cannot delete recursively as part of a batch.")
            if batch_size <= 0:
                raise ValueError(f"batch_size must be positive, got {batch_size}")
            await self._delete_descendant_collections(batch_size=batch_size)

        self._prepare_delete()
        await self._write_delete(batch=batch)
        self._transition_to_deleted()

    async def _delete_descendant_collections(self, batch_size: int) -> None:
        """Delete all subcollections beneath this document asynchronously."""
        names = await self.collections(names_only=True)
        for name in names:
            subcollection = self.collection(name)
            await subcollection.delete_all(batch_size=batch_size, recursive=True)

    # =========================================================================
    # Subcollection Utilities
    # =========================================================================

    async def delete_subcollection(
        self,
        name: str,
        *,
        batch_size: int = 50,
        recursive: bool = True,
        dry_run: bool = False,
    ) -> Dict[str, int]:
        """
        Delete a subcollection beneath this document asynchronously.

        Args:
            name: Subcollection name relative to this document.
            batch_size: Maximum number of deletes per commit.
            recursive: Whether to delete nested subcollections.
            dry_run: Count affected documents without executing writes.

        Returns:
            Dictionary with counts for deleted documents and subcollections.
        """
        subcollection = self.collection(name)
        return await subcollection.delete_all(
            batch_size=batch_size,
            recursive=recursive,
            dry_run=dry_run,
        )

    # =========================================================================
    # Factory Methods
    # =========================================================================

    @classmethod
    def from_snapshot(
        cls,
        snapshot: DocumentSnapshot,
        parent_collection: Optional[Any] = None,
        sync_client: Optional[Any] = None
    ) -> 'AsyncFireObject':
        """
        Create an AsyncFireObject from a DocumentSnapshot.

        Args:
            snapshot: DocumentSnapshot from native async API.
            parent_collection: Optional parent collection reference.
            sync_client: Optional sync Firestore client for async lazy loading.

        Returns:
            AsyncFireObject in LOADED state.

        Raises:
            ValueError: If snapshot doesn't exist.

        Example:
            async for doc in query.stream():
                user = AsyncFireObject.from_snapshot(doc)
        """
        init_data = cls._create_from_snapshot_base(snapshot, parent_collection, sync_client)

        obj = cls(
            doc_ref=init_data['doc_ref'],
            initial_state=init_data['initial_state'],
            parent_collection=init_data['parent_collection'],
            sync_client=sync_client
        )

        object.__setattr__(obj, '_data', init_data['data'])
        # Dirty tracking is already cleared by __init__ and _transition_to_loaded

        return obj

__getattr__(name)

Handle attribute access for document fields with lazy loading.

This method implements lazy loading: if the object is in ATTACHED state, accessing any data attribute will automatically trigger a synchronous fetch to load the data from Firestore using a companion sync client.

This fetch happens once per object - after the first attribute access, the object transitions to LOADED state and subsequent accesses are instant dict lookups.

Args: name: The attribute name being accessed.

Returns: The value of the field from the internal _data cache.

Raises: AttributeError: If the attribute doesn't exist in _data after fetching (if necessary). NotFound: If document doesn't exist in Firestore (during lazy load).

State Transitions: ATTACHED -> LOADED: Automatically fetches data on first access.

Example: user = db.doc('users/alovelace') # ATTACHED name = user.name # Triggers sync fetch, transitions to LOADED year = user.year # No fetch needed, already LOADED

Source code in src/fire_prox/async_fire_object.py
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
def __getattr__(self, name: str) -> Any:
    """
    Handle attribute access for document fields with lazy loading.

    This method implements lazy loading: if the object is in ATTACHED state,
    accessing any data attribute will automatically trigger a synchronous fetch
    to load the data from Firestore using a companion sync client.

    This fetch happens **once per object** - after the first attribute access,
    the object transitions to LOADED state and subsequent accesses are instant
    dict lookups.

    Args:
        name: The attribute name being accessed.

    Returns:
        The value of the field from the internal _data cache.

    Raises:
        AttributeError: If the attribute doesn't exist in _data after
                       fetching (if necessary).
        NotFound: If document doesn't exist in Firestore (during lazy load).

    State Transitions:
        ATTACHED -> LOADED: Automatically fetches data on first access.

    Example:
        user = db.doc('users/alovelace')  # ATTACHED
        name = user.name  # Triggers sync fetch, transitions to LOADED
        year = user.year  # No fetch needed, already LOADED
    """
    if name in self._INTERNAL_ATTRS:
        raise AttributeError(f"Internal attribute {name} not set")

    # If we're in ATTACHED state, trigger lazy loading via sync fetch
    if self._state == State.ATTACHED and self._sync_doc_ref:
        # Use sync doc ref for lazy loading (synchronous fetch)
        snapshot = self._sync_doc_ref.get()

        if not snapshot.exists:
            raise NotFound(f"Document {self._sync_doc_ref.path} does not exist")

        # Get data and convert special types (DocumentReference → FireObject, etc.)
        data = snapshot.to_dict() or {}
        converted_data = {}
        sync_client = (
            self._sync_doc_ref._client
            if hasattr(self, '_sync_doc_ref') and self._sync_doc_ref
            else None
        )
        for key, value in data.items():
            converted_data[key] = self._convert_snapshot_value_for_retrieval(
                value,
                is_async=True,
                sync_client=sync_client,
            )

        # Transition to LOADED with converted data
        self._transition_to_loaded(converted_data)

    return self._materialize_field(name)

collections(names_only=False) async

List subcollections beneath this document asynchronously.

Args: names_only: When True, return collection IDs instead of wrappers.

Returns: List of subcollection names or AsyncFireCollection wrappers.

Source code in src/fire_prox/async_fire_object.py
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
async def collections(self, names_only: bool = False) -> List[Any]:
    """
    List subcollections beneath this document asynchronously.

    Args:
        names_only: When True, return collection IDs instead of wrappers.

    Returns:
        List of subcollection names or AsyncFireCollection wrappers.
    """
    self._validate_not_detached("collections()")
    self._validate_not_deleted("collections()")

    results: List[Any] = []
    async for subcollection_ref in self._doc_ref.collections():
        if names_only:
            results.append(subcollection_ref.id)
        else:
            results.append(self.collection(subcollection_ref.id))
    return results

delete(batch=None, *, recursive=True, batch_size=50) async

Delete the document from Firestore asynchronously.

Args: batch: Optional batch object for batched deletes. If provided, the delete will be accumulated in the batch (committed later). recursive: When True (default), delete all subcollections first. batch_size: Batch size to use for recursive subcollection cleanup.

Raises: ValueError: If called on DETACHED object. RuntimeError: If called on DELETED object. ValueError: If recursive deletion is requested while using a batch.

State Transitions: ATTACHED -> DELETED LOADED -> DELETED

Example: user = db.doc('users/alovelace') await user.delete()

# Batch delete
batch = db.batch()
user1.delete(batch=batch, recursive=False)
user2.delete(batch=batch, recursive=False)
await batch.commit()  # Commit all operations
Source code in src/fire_prox/async_fire_object.py
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
async def delete(
    self,
    batch: Optional[Any] = None,
    *,
    recursive: bool = True,
    batch_size: int = 50,
) -> None:
    """
    Delete the document from Firestore asynchronously.

    Args:
        batch: Optional batch object for batched deletes. If provided,
              the delete will be accumulated in the batch (committed later).
        recursive: When True (default), delete all subcollections first.
        batch_size: Batch size to use for recursive subcollection cleanup.

    Raises:
        ValueError: If called on DETACHED object.
        RuntimeError: If called on DELETED object.
        ValueError: If recursive deletion is requested while using a batch.

    State Transitions:
        ATTACHED -> DELETED
        LOADED -> DELETED

    Example:
        user = db.doc('users/alovelace')
        await user.delete()

        # Batch delete
        batch = db.batch()
        user1.delete(batch=batch, recursive=False)
        user2.delete(batch=batch, recursive=False)
        await batch.commit()  # Commit all operations
    """
    if recursive:
        if batch is not None:
            raise ValueError("Cannot delete recursively as part of a batch.")
        if batch_size <= 0:
            raise ValueError(f"batch_size must be positive, got {batch_size}")
        await self._delete_descendant_collections(batch_size=batch_size)

    self._prepare_delete()
    await self._write_delete(batch=batch)
    self._transition_to_deleted()

delete_subcollection(name, *, batch_size=50, recursive=True, dry_run=False) async

Delete a subcollection beneath this document asynchronously.

Args: name: Subcollection name relative to this document. batch_size: Maximum number of deletes per commit. recursive: Whether to delete nested subcollections. dry_run: Count affected documents without executing writes.

Returns: Dictionary with counts for deleted documents and subcollections.

Source code in src/fire_prox/async_fire_object.py
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
async def delete_subcollection(
    self,
    name: str,
    *,
    batch_size: int = 50,
    recursive: bool = True,
    dry_run: bool = False,
) -> Dict[str, int]:
    """
    Delete a subcollection beneath this document asynchronously.

    Args:
        name: Subcollection name relative to this document.
        batch_size: Maximum number of deletes per commit.
        recursive: Whether to delete nested subcollections.
        dry_run: Count affected documents without executing writes.

    Returns:
        Dictionary with counts for deleted documents and subcollections.
    """
    subcollection = self.collection(name)
    return await subcollection.delete_all(
        batch_size=batch_size,
        recursive=recursive,
        dry_run=dry_run,
    )

fetch(force=False, transaction=None) async

Fetch document data from Firestore asynchronously.

Args: force: If True, fetch data even if already LOADED. transaction: Optional transaction object for transactional reads.

Returns: Self, to allow method chaining.

Raises: ValueError: If called on DETACHED object. RuntimeError: If called on DELETED object. NotFound: If document doesn't exist.

State Transitions: ATTACHED -> LOADED LOADED -> LOADED (if force=True)

Example: # Normal fetch user = db.doc('users/alovelace') # ATTACHED await user.fetch() # Now LOADED

# Transactional fetch
transaction = db.transaction()
@firestore.async_transactional
async def read_user(transaction):
    await user.fetch(transaction=transaction)
    return user.credits
credits = await read_user(transaction)
Source code in src/fire_prox/async_fire_object.py
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
async def fetch(self, force: bool = False, transaction: Optional[Any] = None) -> 'AsyncFireObject':
    """
    Fetch document data from Firestore asynchronously.

    Args:
        force: If True, fetch data even if already LOADED.
        transaction: Optional transaction object for transactional reads.

    Returns:
        Self, to allow method chaining.

    Raises:
        ValueError: If called on DETACHED object.
        RuntimeError: If called on DELETED object.
        NotFound: If document doesn't exist.

    State Transitions:
        ATTACHED -> LOADED
        LOADED -> LOADED (if force=True)

    Example:
        # Normal fetch
        user = db.doc('users/alovelace')  # ATTACHED
        await user.fetch()  # Now LOADED

        # Transactional fetch
        transaction = db.transaction()
        @firestore.async_transactional
        async def read_user(transaction):
            await user.fetch(transaction=transaction)
            return user.credits
        credits = await read_user(transaction)
    """
    if self._should_skip_fetch(force):
        return self

    snapshot = await self._get_snapshot(transaction)
    self._process_snapshot(snapshot, is_async=True)

    return self

from_snapshot(snapshot, parent_collection=None, sync_client=None) classmethod

Create an AsyncFireObject from a DocumentSnapshot.

Args: snapshot: DocumentSnapshot from native async API. parent_collection: Optional parent collection reference. sync_client: Optional sync Firestore client for async lazy loading.

Returns: AsyncFireObject in LOADED state.

Raises: ValueError: If snapshot doesn't exist.

Example: async for doc in query.stream(): user = AsyncFireObject.from_snapshot(doc)

Source code in src/fire_prox/async_fire_object.py
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
@classmethod
def from_snapshot(
    cls,
    snapshot: DocumentSnapshot,
    parent_collection: Optional[Any] = None,
    sync_client: Optional[Any] = None
) -> 'AsyncFireObject':
    """
    Create an AsyncFireObject from a DocumentSnapshot.

    Args:
        snapshot: DocumentSnapshot from native async API.
        parent_collection: Optional parent collection reference.
        sync_client: Optional sync Firestore client for async lazy loading.

    Returns:
        AsyncFireObject in LOADED state.

    Raises:
        ValueError: If snapshot doesn't exist.

    Example:
        async for doc in query.stream():
            user = AsyncFireObject.from_snapshot(doc)
    """
    init_data = cls._create_from_snapshot_base(snapshot, parent_collection, sync_client)

    obj = cls(
        doc_ref=init_data['doc_ref'],
        initial_state=init_data['initial_state'],
        parent_collection=init_data['parent_collection'],
        sync_client=sync_client
    )

    object.__setattr__(obj, '_data', init_data['data'])
    # Dirty tracking is already cleared by __init__ and _transition_to_loaded

    return obj

save(doc_id=None, transaction=None, batch=None) async

Save the object's data to Firestore asynchronously.

Args: doc_id: Optional custom document ID for DETACHED objects. transaction: Optional transaction object for transactional writes. batch: Optional batch object for batched writes. If provided, the write will be accumulated in the batch (committed later).

Returns: Self, to allow method chaining.

Raises: RuntimeError: If called on DELETED object. ValueError: If DETACHED without parent_collection, or if trying to create a new document within a transaction or batch.

State Transitions: DETACHED -> LOADED (creates new document) LOADED -> LOADED (updates if dirty)

Example: # Normal save user = collection.new() user.name = 'Ada' await user.save(doc_id='alovelace')

# Transactional save
transaction = db.transaction()
@firestore.async_transactional
async def update_user(transaction):
    await user.fetch(transaction=transaction)
    user.credits += 10
    await user.save(transaction=transaction)
await update_user(transaction)

# Batch save
batch = db.batch()
user1.save(batch=batch)
user2.save(batch=batch)
await batch.commit()  # Commit all operations
Source code in src/fire_prox/async_fire_object.py
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
async def save(
    self,
    doc_id: Optional[str] = None,
    transaction: Optional[Any] = None,
    batch: Optional[Any] = None,
) -> 'AsyncFireObject':
    """
    Save the object's data to Firestore asynchronously.

    Args:
        doc_id: Optional custom document ID for DETACHED objects.
        transaction: Optional transaction object for transactional writes.
        batch: Optional batch object for batched writes. If provided,
              the write will be accumulated in the batch (committed later).

    Returns:
        Self, to allow method chaining.

    Raises:
        RuntimeError: If called on DELETED object.
        ValueError: If DETACHED without parent_collection, or if
                   trying to create a new document within a transaction or batch.

    State Transitions:
        DETACHED -> LOADED (creates new document)
        LOADED -> LOADED (updates if dirty)

    Example:
        # Normal save
        user = collection.new()
        user.name = 'Ada'
        await user.save(doc_id='alovelace')

        # Transactional save
        transaction = db.transaction()
        @firestore.async_transactional
        async def update_user(transaction):
            await user.fetch(transaction=transaction)
            user.credits += 10
            await user.save(transaction=transaction)
        await update_user(transaction)

        # Batch save
        batch = db.batch()
        user1.save(batch=batch)
        user2.save(batch=batch)
        await batch.commit()  # Commit all operations
    """
    self._validate_not_deleted("save()")

    if self._state == State.DETACHED:
        doc_ref, storage_data = self._prepare_detached_save(doc_id, transaction, batch)
        await self._write_set(storage_data, doc_ref=doc_ref)
        object.__setattr__(self, '_state', State.LOADED)
        self._mark_clean()
        return self

    if self._state == State.LOADED:
        if not self.is_dirty():
            return self

        update_dict = self._build_update_dict()
        await self._write_update(update_dict, transaction=transaction, batch=batch)
        self._mark_clean()
        return self

    if self._state == State.ATTACHED:
        storage_data = self._prepare_data_for_storage()
        await self._write_set(storage_data, transaction=transaction, batch=batch)
        object.__setattr__(self, '_state', State.LOADED)
        self._mark_clean()
        return self

    return self

AsyncFireProx

Bases: BaseFireProx

Main entry point for the async FireProx library.

AsyncFireProx wraps the native google-cloud-firestore AsyncClient and provides a simplified, Pythonic interface for working with Firestore asynchronously.

Usage Examples: # Initialize with a pre-configured native async client from google.cloud import firestore from fire_prox import AsyncFireProx

native_client = firestore.AsyncClient(project='my-project')
db = AsyncFireProx(native_client)

# Access a document (ATTACHED state)
user = db.doc('users/alovelace')
await user.fetch()
print(user.name)

# Create a new document
users = db.collection('users')
new_user = users.new()
new_user.name = 'Charles Babbage'
new_user.year = 1791
await new_user.save()

# Update a document
user = db.doc('users/alovelace')
await user.fetch()
user.year = 1816
await user.save()

# Delete a document
await user.delete()
Source code in src/fire_prox/async_fireprox.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
class AsyncFireProx(BaseFireProx):
    """
    Main entry point for the async FireProx library.

    AsyncFireProx wraps the native google-cloud-firestore AsyncClient and provides
    a simplified, Pythonic interface for working with Firestore asynchronously.

    Usage Examples:
        # Initialize with a pre-configured native async client
        from google.cloud import firestore
        from fire_prox import AsyncFireProx

        native_client = firestore.AsyncClient(project='my-project')
        db = AsyncFireProx(native_client)

        # Access a document (ATTACHED state)
        user = db.doc('users/alovelace')
        await user.fetch()
        print(user.name)

        # Create a new document
        users = db.collection('users')
        new_user = users.new()
        new_user.name = 'Charles Babbage'
        new_user.year = 1791
        await new_user.save()

        # Update a document
        user = db.doc('users/alovelace')
        await user.fetch()
        user.year = 1816
        await user.save()

        # Delete a document
        await user.delete()
    """

    def __init__(self, client: AsyncFirestoreClient):
        """
        Initialize AsyncFireProx with a native async Firestore client.

        Args:
            client: A configured google.cloud.firestore.AsyncClient instance.
                   Authentication and project configuration should be handled
                   before creating this instance.

        Raises:
            TypeError: If client is not a google.cloud.firestore.AsyncClient.

        Example:
            from google.cloud import firestore
            from fire_prox import AsyncFireProx

            # Option 1: Default credentials
            native_client = firestore.AsyncClient()

            # Option 2: Explicit project
            native_client = firestore.AsyncClient(project='my-project-id')

            # Initialize AsyncFireProx
            db = AsyncFireProx(native_client)
        """
        if not isinstance(client, AsyncFirestoreClient):
            raise TypeError(
                f"client must be a google.cloud.firestore.AsyncClient, "
                f"got {type(client)}"
            )

        super().__init__(client)

            # Create companion sync client for lazy loading
        # Both clients point to the same Firestore backend
        from google.cloud import firestore
        self._sync_client = firestore.Client(
            project=client.project,
            database=client._database
        )

    # =========================================================================
    # Document Access
    # =========================================================================

    def doc(self, path: str) -> AsyncFireObject:
        """
        Get a reference to a document by its full path.

        Creates an AsyncFireObject in ATTACHED state. No data is fetched from
        Firestore until fetch() is called or an attribute is accessed (lazy loading).

        Args:
            path: The full document path, e.g., 'users/alovelace' or
                 'users/uid/posts/post123'. Must be a valid Firestore
                 document path with an even number of segments.

        Returns:
            An AsyncFireObject instance in ATTACHED state.

        Raises:
            ValueError: If path has an odd number of segments.

        Example:
            # Root-level document with lazy loading
            user = db.doc('users/alovelace')
            print(user.name)  # Triggers automatic fetch

            # Or explicit fetch
            user = db.doc('users/alovelace')
            await user.fetch()
            print(user.name)

            # Nested document (subcollection)
            post = db.doc('users/alovelace/posts/post123')
            await post.fetch()
        """
        return self._create_document_proxy(path, AsyncFireObject)

    def document(self, path: str) -> AsyncFireObject:
        """
        Alias for doc(). Get a reference to a document by its full path.

        Args:
            path: The full document path.

        Returns:
            An AsyncFireObject instance in ATTACHED state.
        """
        return self.doc(path)

    # =========================================================================
    # Collection Access
    # =========================================================================

    def collection(self, path: str) -> AsyncFireCollection:
        """
        Get a reference to a collection by its path.

        Creates an AsyncFireCollection wrapper around the native
        AsyncCollectionReference.

        Args:
            path: The collection path, e.g., 'users' or 'users/uid/posts'.
                 Must have an odd number of segments.

        Returns:
            An AsyncFireCollection instance.

        Raises:
            ValueError: If path has an even number of segments.

        Example:
            # Root-level collection
            users = db.collection('users')
            new_user = users.new()
            new_user.name = 'Ada'
            await new_user.save()

            # Subcollection
            posts = db.collection('users/alovelace/posts')
            new_post = posts.new()
            new_post.title = 'Analysis Engine'
            await new_post.save()
        """
        return self._create_collection_proxy(path, AsyncFireCollection)

    async def collections(self, path: str, *, names_only: bool = False) -> list[Any]:
        """
        List subcollections beneath the specified document path asynchronously.

        Args:
            path: Document path whose subcollections should be listed.
            names_only: Return collection IDs instead of AsyncFireCollection wrappers.

        Returns:
            List of subcollection names or AsyncFireCollection wrappers.
        """
        document = self.doc(path)
        return await document.collections(names_only=names_only)

    def _get_document_kwargs(self, path: str) -> Dict[str, Any]:
        sync_doc_ref = self._sync_client.document(path)
        return {'sync_doc_ref': sync_doc_ref, 'sync_client': self._sync_client}

    def _get_collection_kwargs(self, path: str) -> Dict[str, Any]:
        return {'sync_client': self._sync_client}

__init__(client)

Initialize AsyncFireProx with a native async Firestore client.

Args: client: A configured google.cloud.firestore.AsyncClient instance. Authentication and project configuration should be handled before creating this instance.

Raises: TypeError: If client is not a google.cloud.firestore.AsyncClient.

Example: from google.cloud import firestore from fire_prox import AsyncFireProx

# Option 1: Default credentials
native_client = firestore.AsyncClient()

# Option 2: Explicit project
native_client = firestore.AsyncClient(project='my-project-id')

# Initialize AsyncFireProx
db = AsyncFireProx(native_client)
Source code in src/fire_prox/async_fireprox.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def __init__(self, client: AsyncFirestoreClient):
    """
    Initialize AsyncFireProx with a native async Firestore client.

    Args:
        client: A configured google.cloud.firestore.AsyncClient instance.
               Authentication and project configuration should be handled
               before creating this instance.

    Raises:
        TypeError: If client is not a google.cloud.firestore.AsyncClient.

    Example:
        from google.cloud import firestore
        from fire_prox import AsyncFireProx

        # Option 1: Default credentials
        native_client = firestore.AsyncClient()

        # Option 2: Explicit project
        native_client = firestore.AsyncClient(project='my-project-id')

        # Initialize AsyncFireProx
        db = AsyncFireProx(native_client)
    """
    if not isinstance(client, AsyncFirestoreClient):
        raise TypeError(
            f"client must be a google.cloud.firestore.AsyncClient, "
            f"got {type(client)}"
        )

    super().__init__(client)

        # Create companion sync client for lazy loading
    # Both clients point to the same Firestore backend
    from google.cloud import firestore
    self._sync_client = firestore.Client(
        project=client.project,
        database=client._database
    )

collection(path)

Get a reference to a collection by its path.

Creates an AsyncFireCollection wrapper around the native AsyncCollectionReference.

Args: path: The collection path, e.g., 'users' or 'users/uid/posts'. Must have an odd number of segments.

Returns: An AsyncFireCollection instance.

Raises: ValueError: If path has an even number of segments.

Example: # Root-level collection users = db.collection('users') new_user = users.new() new_user.name = 'Ada' await new_user.save()

# Subcollection
posts = db.collection('users/alovelace/posts')
new_post = posts.new()
new_post.title = 'Analysis Engine'
await new_post.save()
Source code in src/fire_prox/async_fireprox.py
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
def collection(self, path: str) -> AsyncFireCollection:
    """
    Get a reference to a collection by its path.

    Creates an AsyncFireCollection wrapper around the native
    AsyncCollectionReference.

    Args:
        path: The collection path, e.g., 'users' or 'users/uid/posts'.
             Must have an odd number of segments.

    Returns:
        An AsyncFireCollection instance.

    Raises:
        ValueError: If path has an even number of segments.

    Example:
        # Root-level collection
        users = db.collection('users')
        new_user = users.new()
        new_user.name = 'Ada'
        await new_user.save()

        # Subcollection
        posts = db.collection('users/alovelace/posts')
        new_post = posts.new()
        new_post.title = 'Analysis Engine'
        await new_post.save()
    """
    return self._create_collection_proxy(path, AsyncFireCollection)

collections(path, *, names_only=False) async

List subcollections beneath the specified document path asynchronously.

Args: path: Document path whose subcollections should be listed. names_only: Return collection IDs instead of AsyncFireCollection wrappers.

Returns: List of subcollection names or AsyncFireCollection wrappers.

Source code in src/fire_prox/async_fireprox.py
181
182
183
184
185
186
187
188
189
190
191
192
193
async def collections(self, path: str, *, names_only: bool = False) -> list[Any]:
    """
    List subcollections beneath the specified document path asynchronously.

    Args:
        path: Document path whose subcollections should be listed.
        names_only: Return collection IDs instead of AsyncFireCollection wrappers.

    Returns:
        List of subcollection names or AsyncFireCollection wrappers.
    """
    document = self.doc(path)
    return await document.collections(names_only=names_only)

doc(path)

Get a reference to a document by its full path.

Creates an AsyncFireObject in ATTACHED state. No data is fetched from Firestore until fetch() is called or an attribute is accessed (lazy loading).

Args: path: The full document path, e.g., 'users/alovelace' or 'users/uid/posts/post123'. Must be a valid Firestore document path with an even number of segments.

Returns: An AsyncFireObject instance in ATTACHED state.

Raises: ValueError: If path has an odd number of segments.

Example: # Root-level document with lazy loading user = db.doc('users/alovelace') print(user.name) # Triggers automatic fetch

# Or explicit fetch
user = db.doc('users/alovelace')
await user.fetch()
print(user.name)

# Nested document (subcollection)
post = db.doc('users/alovelace/posts/post123')
await post.fetch()
Source code in src/fire_prox/async_fireprox.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def doc(self, path: str) -> AsyncFireObject:
    """
    Get a reference to a document by its full path.

    Creates an AsyncFireObject in ATTACHED state. No data is fetched from
    Firestore until fetch() is called or an attribute is accessed (lazy loading).

    Args:
        path: The full document path, e.g., 'users/alovelace' or
             'users/uid/posts/post123'. Must be a valid Firestore
             document path with an even number of segments.

    Returns:
        An AsyncFireObject instance in ATTACHED state.

    Raises:
        ValueError: If path has an odd number of segments.

    Example:
        # Root-level document with lazy loading
        user = db.doc('users/alovelace')
        print(user.name)  # Triggers automatic fetch

        # Or explicit fetch
        user = db.doc('users/alovelace')
        await user.fetch()
        print(user.name)

        # Nested document (subcollection)
        post = db.doc('users/alovelace/posts/post123')
        await post.fetch()
    """
    return self._create_document_proxy(path, AsyncFireObject)

document(path)

Alias for doc(). Get a reference to a document by its full path.

Args: path: The full document path.

Returns: An AsyncFireObject instance in ATTACHED state.

Source code in src/fire_prox/async_fireprox.py
133
134
135
136
137
138
139
140
141
142
143
def document(self, path: str) -> AsyncFireObject:
    """
    Alias for doc(). Get a reference to a document by its full path.

    Args:
        path: The full document path.

    Returns:
        An AsyncFireObject instance in ATTACHED state.
    """
    return self.doc(path)

AsyncFireQuery

A chainable query builder for Firestore collections (asynchronous).

AsyncFireQuery wraps the native google-cloud-firestore AsyncQuery object and provides a simplified, chainable interface for building and executing async queries. It follows an immutable pattern - each method returns a new AsyncFireQuery instance with the modified query.

This is the asynchronous implementation. For sync queries, use FireQuery.

Usage Examples: # Basic filtering query = users.where('birth_year', '>', 1800) results = await query.get() for user in results: print(user.name)

# Chaining multiple conditions
query = (users
         .where('birth_year', '>', 1800)
         .where('country', '==', 'England')
         .order_by('birth_year')
         .limit(10))
async for user in query.stream():
    print(f"{user.name} - {user.birth_year}")

# Async iteration
async for user in users.where('active', '==', True).stream():
    print(user.name)

Design Note: For complex queries beyond the scope of this builder (e.g., OR queries, advanced filtering), use the native AsyncQuery API directly and hydrate results with AsyncFireObject.from_snapshot():

    native_query = client.collection('users').where(...)
    results = [AsyncFireObject.from_snapshot(snap) async for snap in native_query.stream()]
Source code in src/fire_prox/async_fire_query.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
class AsyncFireQuery:
    """
    A chainable query builder for Firestore collections (asynchronous).

    AsyncFireQuery wraps the native google-cloud-firestore AsyncQuery object and
    provides a simplified, chainable interface for building and executing async
    queries. It follows an immutable pattern - each method returns a new
    AsyncFireQuery instance with the modified query.

    This is the asynchronous implementation. For sync queries, use FireQuery.

    Usage Examples:
        # Basic filtering
        query = users.where('birth_year', '>', 1800)
        results = await query.get()
        for user in results:
            print(user.name)

        # Chaining multiple conditions
        query = (users
                 .where('birth_year', '>', 1800)
                 .where('country', '==', 'England')
                 .order_by('birth_year')
                 .limit(10))
        async for user in query.stream():
            print(f"{user.name} - {user.birth_year}")

        # Async iteration
        async for user in users.where('active', '==', True).stream():
            print(user.name)

    Design Note:
        For complex queries beyond the scope of this builder (e.g., OR queries,
        advanced filtering), use the native AsyncQuery API directly and hydrate
        results with AsyncFireObject.from_snapshot():

            native_query = client.collection('users').where(...)
            results = [AsyncFireObject.from_snapshot(snap) async for snap in native_query.stream()]
    """

    def __init__(
        self,
        native_query: AsyncQuery,
        parent_collection: Optional[Any] = None,
        projection: Optional[tuple] = None,
    ):
        """
        Initialize an AsyncFireQuery.

        Args:
            native_query: The underlying native AsyncQuery object from google-cloud-firestore.
            parent_collection: Optional reference to parent AsyncFireCollection.
            projection: Optional tuple of field paths to project (select specific fields).
        """
        self._query = native_query
        self._parent_collection = parent_collection
        self._projection = projection

    # =========================================================================
    # Query Building Methods (Immutable Pattern)
    # =========================================================================

    def where(self, field: str, op: str, value: Any) -> 'AsyncFireQuery':
        """
        Add a filter condition to the query.

        Creates a new AsyncFireQuery with an additional filter condition.
        Uses the immutable pattern - returns a new instance rather than
        modifying the current query.

        Args:
            field: The field path to filter on (e.g., 'name', 'address.city').
            op: Comparison operator. Supported operators:
                '==' (equal), '!=' (not equal),
                '<' (less than), '<=' (less than or equal),
                '>' (greater than), '>=' (greater than or equal),
                'in' (value in list), 'not-in' (value not in list),
                'array-contains' (array contains value),
                'array-contains-any' (array contains any of the values).
            value: The value to compare against.

        Returns:
            A new AsyncFireQuery instance with the added filter.

        Example:
            # Single condition
            query = users.where('birth_year', '>', 1800)

            # Multiple conditions (chained)
            query = (users
                     .where('birth_year', '>', 1800)
                     .where('country', '==', 'England'))
        """
        # Create FieldFilter and add to query
        filter_obj = FieldFilter(field, op, value)
        new_query = self._query.where(filter=filter_obj)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def order_by(self, field: str, direction: str = 'ASCENDING') -> 'AsyncFireQuery':
        """
        Add an ordering clause to the query.

        Creates a new AsyncFireQuery with ordering by the specified field.

        Args:
            field: The field path to order by.
            direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'.
                      Default is 'ASCENDING'.

        Returns:
            A new AsyncFireQuery instance with the ordering applied.

        Example:
            # Ascending order
            query = users.order_by('birth_year')

            # Descending order
            query = users.order_by('birth_year', direction='DESCENDING')

            # Multiple orderings (chained)
            query = (users
                     .order_by('country')
                     .order_by('birth_year', direction='DESCENDING'))
        """
        # Convert direction string to Query constant
        if direction.upper() == 'ASCENDING':
            from google.cloud.firestore_v1 import Query as QueryClass
            direction_const = QueryClass.ASCENDING
        elif direction.upper() == 'DESCENDING':
            from google.cloud.firestore_v1 import Query as QueryClass
            direction_const = QueryClass.DESCENDING
        else:
            raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

        new_query = self._query.order_by(field, direction=direction_const)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def limit(self, count: int) -> 'AsyncFireQuery':
        """
        Limit the number of results returned.

        Creates a new AsyncFireQuery that will return at most `count` results.

        Args:
            count: Maximum number of documents to return. Must be positive.

        Returns:
            A new AsyncFireQuery instance with the limit applied.

        Raises:
            ValueError: If count is not positive.

        Example:
            # Get top 10 results
            query = users.order_by('score', direction='DESCENDING').limit(10)

            # Get first 5 matching documents
            query = users.where('active', '==', True).limit(5)
        """
        if count <= 0:
            raise ValueError(f"Limit count must be positive, got {count}")

        new_query = self._query.limit(count)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def start_at(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
        """
        Start query results at a cursor position (inclusive).

        Creates a new AsyncFireQuery that starts at the specified cursor. The cursor
        can be a document snapshot or a dictionary of field values matching the
        order_by fields.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot from a previous query
                - Direct field values matching order_by clause order

        Returns:
            A new AsyncFireQuery instance with the start cursor applied.

        Example:
            # Using field values (requires matching order_by)
            query = users.order_by('age').start_at({'age': 25})

            # Pagination: get first page, then start at last document
            page1 = await users.order_by('age').limit(10).get()
            last_age = page1[-1].age
            page2 = await users.order_by('age').start_at({'age': last_age}).limit(10).get()

            # Using a document snapshot
            last_doc_ref = page1[-1]._doc_ref
            last_snapshot = await last_doc_ref.get()
            page2 = await users.order_by('age').start_at(last_snapshot).limit(10).get()
        """
        new_query = self._query.start_at(*document_fields_or_snapshot)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def start_after(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
        """
        Start query results after a cursor position (exclusive).

        Creates a new AsyncFireQuery that starts after the specified cursor. The cursor
        document itself is excluded from results. This is typically used for
        pagination to avoid duplicating the last document from the previous page.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot from a previous query
                - Direct field values matching order_by clause order

        Returns:
            A new AsyncFireQuery instance with the start-after cursor applied.

        Example:
            # Pagination: exclude the last document from previous page
            page1 = await users.order_by('age').limit(10).get()
            last_age = page1[-1].age
            page2 = await users.order_by('age').start_after({'age': last_age}).limit(10).get()

            # Using a document snapshot (common pattern)
            last_doc_ref = page1[-1]._doc_ref
            last_snapshot = await last_doc_ref.get()
            page2 = await users.order_by('age').start_after(last_snapshot).limit(10).get()
        """
        new_query = self._query.start_after(*document_fields_or_snapshot)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def end_at(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
        """
        End query results at a cursor position (inclusive).

        Creates a new AsyncFireQuery that ends at the specified cursor. The cursor
        document is included in the results.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot
                - Direct field values matching order_by clause order

        Returns:
            A new AsyncFireQuery instance with the end cursor applied.

        Example:
            # Get all users up to and including age 50
            query = users.order_by('age').end_at({'age': 50})

            # Using a specific document as endpoint
            target_doc_ref = users.doc('user123')._doc_ref
            target_snapshot = await target_doc_ref.get()
            query = users.order_by('age').end_at(target_snapshot)
        """
        new_query = self._query.end_at(*document_fields_or_snapshot)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def end_before(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
        """
        End query results before a cursor position (exclusive).

        Creates a new AsyncFireQuery that ends before the specified cursor. The cursor
        document itself is excluded from results.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot
                - Direct field values matching order_by clause order

        Returns:
            A new AsyncFireQuery instance with the end-before cursor applied.

        Example:
            # Get all users before age 50 (exclude 50)
            query = users.order_by('age').end_before({'age': 50})

            # Using a specific document as exclusive endpoint
            target_doc_ref = users.doc('user123')._doc_ref
            target_snapshot = await target_doc_ref.get()
            query = users.order_by('age').end_before(target_snapshot)
        """
        new_query = self._query.end_before(*document_fields_or_snapshot)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def select(self, *field_paths: str) -> 'AsyncFireQuery':
        """
        Select specific fields to return (projection).

        Creates a new AsyncFireQuery that only returns the specified fields in the
        query results. When using projections, query results will be returned
        as vanilla dictionaries instead of AsyncFireObject instances. Any
        DocumentReferences in the returned dictionaries will be automatically
        converted to AsyncFireObject instances in ATTACHED state.

        Args:
            *field_paths: One or more field paths to select. Field paths can
                         include nested fields using dot notation (e.g., 'address.city').

        Returns:
            A new AsyncFireQuery instance with the projection applied.

        Raises:
            ValueError: If no field paths are provided.

        Example:
            # Select a single field
            query = users.select('name')
            results = await query.get()
            # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

            # Select multiple fields
            query = users.select('name', 'email', 'birth_year')
            results = await query.get()
            # Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

            # Select with filtering and ordering
            query = (users
                     .where('birth_year', '>', 1990)
                     .select('name', 'birth_year')
                     .order_by('birth_year')
                     .limit(10))

            # DocumentReferences are auto-converted to AsyncFireObjects
            query = posts.select('title', 'author')  # author is a DocumentReference
            results = await query.get()
            # results[0]['author'] is an AsyncFireObject, not a DocumentReference
            await results[0]['author'].fetch()
            print(results[0]['author'].name)

        Note:
            - Projection queries return dictionaries, not AsyncFireObject instances
            - Only the selected fields will be present in the returned dictionaries
            - DocumentReferences are automatically hydrated to AsyncFireObject instances
            - Projected results are more bandwidth-efficient for large documents
        """
        if not field_paths:
            raise ValueError("select() requires at least one field path")

        # Create new query with projection
        new_query = self._query.select(list(field_paths))
        return AsyncFireQuery(new_query, self._parent_collection, projection=field_paths)

    def find_nearest(
        self,
        vector_field: str,
        query_vector: Any,
        distance_measure: Any,
        limit: int,
        distance_result_field: Optional[str] = None,
    ) -> 'AsyncFireQuery':
        """
        Find the nearest neighbors based on vector similarity.

        Performs a vector similarity search on top of the current query filters.
        This allows you to combine pre-filtering with vector search (requires
        a composite index).

        Args:
            vector_field: Name of the field containing vector embeddings.
            query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
            distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
                DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
            limit: Maximum number of nearest neighbors to return (max 1000).
            distance_result_field: Optional field name to store the calculated distance
                in the query results.

        Returns:
            A new AsyncFireQuery instance with the vector search applied.

        Example:
            from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
            from google.cloud.firestore_v1.vector import Vector

            # Find nearest neighbors with pre-filtering
            query = (collection
                     .where('category', '==', 'tech')
                     .find_nearest(
                         vector_field="embedding",
                         query_vector=Vector([0.1, 0.2, 0.3]),
                         distance_measure=DistanceMeasure.COSINE,
                         limit=5
                     ))
            async for doc in query.stream():
                print(f"{doc.title}: {doc.category}")

        Note:
            - Requires a composite index when combining with where() clauses
            - Maximum limit is 1000 documents
            - Does not work with Firestore emulator (production only)
        """
        # Create vector query using native find_nearest
        new_query = self._query.find_nearest(
            vector_field=vector_field,
            query_vector=query_vector,
            distance_measure=distance_measure,
            limit=limit,
            distance_result_field=distance_result_field,
        )
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    # =========================================================================
    # Aggregation Methods
    # =========================================================================

    async def count(self) -> int:
        """
        Count documents matching the query.

        Executes an aggregation query to count the number of documents that
        match the current query filters without fetching the actual documents.
        This is more efficient than fetching all documents and counting them.

        Returns:
            Integer count of matching documents. Returns 0 if no documents match.

        Example:
            # Count all users
            total_users = await users.count()
            # Returns: 150

            # Count with filters
            active_users = await users.where('active', '==', True).count()
            # Returns: 42

            # Count with complex query
            count = await (users
                          .where('age', '>', 25)
                          .where('country', '==', 'USA')
                          .count())
            # Returns: 37

        Note:
            This uses Firestore's native aggregation API, which is more efficient
            than fetching documents. However, it still counts as one document read
            per 1000 documents in the collection.
        """
        # Create async aggregation query using AsyncQuery's count method
        agg_query = self._query.count(alias='count')

        # Execute and extract result (await the async get method)
        result = await agg_query.get()
        if result and len(result) > 0:
            # Extract count from first (and only) aggregation result
            for agg_result in result:
                return agg_result[0].value
        return 0

    async def sum(self, field: str) -> Union[int, float]:
        """
        Sum a numeric field across all matching documents.

        Executes an aggregation query to sum the values of a specific field
        without fetching the actual documents. The field must contain numeric
        values (int or float).

        Args:
            field: Name of the numeric field to sum.

        Returns:
            Sum of the field values across all matching documents.
            Returns 0 if no documents match or if all values are null.

        Raises:
            ValueError: If field is None or empty.

        Example:
            # Sum all salaries
            total_salary = await employees.sum('salary')
            # Returns: 5000000

            # Sum with filters
            engineering_salary = await (employees
                                       .where('department', '==', 'Engineering')
                                       .sum('salary'))
            # Returns: 2500000

            # Sum revenue from active products
            total_revenue = await (products
                                  .where('active', '==', True)
                                  .sum('revenue'))
            # Returns: 1250000.50

        Note:
            - Null values are ignored in the sum
            - Non-numeric values will cause an error
            - This is more efficient than fetching all documents
        """
        if not field:
            raise ValueError("sum() requires a field name")

        # Create async aggregation query using AsyncQuery's sum method
        agg_query = self._query.sum(field, alias='sum')

        # Execute and extract result (await the async get method)
        result = await agg_query.get()
        if result and len(result) > 0:
            # Extract sum from first (and only) aggregation result
            for agg_result in result:
                return agg_result[0].value
        return 0

    async def avg(self, field: str) -> float:
        """
        Average a numeric field across all matching documents.

        Executes an aggregation query to calculate the arithmetic mean of a
        specific field without fetching the actual documents. The field must
        contain numeric values (int or float).

        Args:
            field: Name of the numeric field to average.

        Returns:
            Average of the field values across all matching documents.
            Returns 0.0 if no documents match or if all values are null.

        Raises:
            ValueError: If field is None or empty.

        Example:
            # Average age of all users
            avg_age = await users.avg('age')
            # Returns: 32.5

            # Average with filters
            avg_salary = await (employees
                               .where('department', '==', 'Engineering')
                               .avg('salary'))
            # Returns: 125000.0

            # Average rating for active products
            avg_rating = await (products
                               .where('active', '==', True)
                               .avg('rating'))
            # Returns: 4.2

        Note:
            - Null values are ignored in the average calculation
            - Non-numeric values will cause an error
            - This is more efficient than fetching all documents
        """
        if not field:
            raise ValueError("avg() requires a field name")

        # Create async aggregation query using AsyncQuery's avg method
        agg_query = self._query.avg(field, alias='avg')

        # Execute and extract result (await the async get method)
        result = await agg_query.get()
        if result and len(result) > 0:
            # Extract average from first (and only) aggregation result
            for agg_result in result:
                value = agg_result[0].value
                return value if value is not None else 0.0
        return 0.0

    async def aggregate(self, **aggregations) -> Dict[str, Any]:
        """
        Perform multiple aggregations in a single query.

        Executes an aggregation query with multiple aggregation operations
        (count, sum, average) without fetching the actual documents. This is
        more efficient than running multiple separate aggregation queries.

        Args:
            **aggregations: Named aggregations using Count(), Sum(field), or
                          Avg(field) from fire_prox.aggregation module.

        Returns:
            Dictionary mapping aggregation names to their results.

        Raises:
            ValueError: If no aggregations are provided or if invalid
                       aggregation types are used.

        Example:
            from fire_prox.aggregation import Count, Sum, Avg

            # Multiple aggregations in one query
            stats = await employees.aggregate(
                total_count=Count(),
                total_salary=Sum('salary'),
                avg_salary=Avg('salary'),
                avg_age=Avg('age')
            )
            # Returns: {
            #     'total_count': 150,
            #     'total_salary': 15000000,
            #     'avg_salary': 100000.0,
            #     'avg_age': 35.2
            # }

            # With filters
            eng_stats = await (employees
                              .where('department', '==', 'Engineering')
                              .aggregate(
                                  count=Count(),
                                  total_salary=Sum('salary')
                              ))
            # Returns: {'count': 50, 'total_salary': 5000000}

            # Financial dashboard
            financials = await (transactions
                               .where('date', '>=', start_date)
                               .aggregate(
                                   total_transactions=Count(),
                                   total_revenue=Sum('amount'),
                                   avg_transaction=Avg('amount')
                               ))

        Note:
            - Much more efficient than multiple separate aggregation queries
            - All aggregations execute in a single round-trip to Firestore
            - Null values are ignored in sum and average calculations
        """
        if not aggregations:
            raise ValueError("aggregate() requires at least one aggregation")

        from .aggregation import Avg, Count, Sum

        # Start with the first aggregation to create the AsyncAggregationQuery
        first_alias, first_agg_type = next(iter(aggregations.items()))

        if isinstance(first_agg_type, Count):
            agg_query = self._query.count(alias=first_alias)
        elif isinstance(first_agg_type, Sum):
            if not first_agg_type.field:
                raise ValueError(f"Sum aggregation '{first_alias}' is missing a field name")
            agg_query = self._query.sum(first_agg_type.field, alias=first_alias)
        elif isinstance(first_agg_type, Avg):
            if not first_agg_type.field:
                raise ValueError(f"Avg aggregation '{first_alias}' is missing a field name")
            agg_query = self._query.avg(first_agg_type.field, alias=first_alias)
        else:
            raise ValueError(
                f"Invalid aggregation type for '{first_alias}': {type(first_agg_type).__name__}. "
                f"Use Count(), Sum(field), or Avg(field)"
            )

        # Add remaining aggregations
        remaining_items = list(aggregations.items())[1:]
        for alias, agg_type in remaining_items:
            if isinstance(agg_type, Count):
                agg_query = agg_query.count(alias=alias)
            elif isinstance(agg_type, Sum):
                if not agg_type.field:
                    raise ValueError(f"Sum aggregation '{alias}' is missing a field name")
                agg_query = agg_query.sum(agg_type.field, alias=alias)
            elif isinstance(agg_type, Avg):
                if not agg_type.field:
                    raise ValueError(f"Avg aggregation '{alias}' is missing a field name")
                agg_query = agg_query.avg(agg_type.field, alias=alias)
            else:
                raise ValueError(
                    f"Invalid aggregation type for '{alias}': {type(agg_type).__name__}. "
                    f"Use Count(), Sum(field), or Avg(field)"
                )

        # Execute and extract results (await the async get method)
        results_dict = {}
        result = await agg_query.get()

        if result and len(result) > 0:
            for agg_result in result:
                # Extract all aggregation results by matching aliases
                for agg in agg_result:
                    value = agg.value
                    # Convert None to 0 for consistency
                    results_dict[agg.alias] = value if value is not None else 0

        return results_dict

    # =========================================================================
    # Helper Methods
    # =========================================================================

    def _convert_projection_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Convert DocumentReferences in projection data to AsyncFireObjects.

        Recursively processes a dictionary to convert any DocumentReference
        or AsyncDocumentReference instances to AsyncFireObject instances in
        ATTACHED state. This allows users to work with references naturally
        using the FireProx API.

        Args:
            data: Dictionary containing projection data from Firestore.

        Returns:
            Dictionary with DocumentReferences converted to AsyncFireObjects.
        """
        from .state import State

        result = {}
        for key, value in data.items():
            if isinstance(value, (DocumentReference, AsyncDocumentReference)):
                # Convert DocumentReference/AsyncDocumentReference to AsyncFireObject in ATTACHED state
                result[key] = AsyncFireObject(
                    doc_ref=value,
                    initial_state=State.ATTACHED,
                    parent_collection=self._parent_collection
                )
            elif isinstance(value, list):
                # Recursively process lists
                result[key] = [
                    AsyncFireObject(
                        doc_ref=item,
                        initial_state=State.ATTACHED,
                        parent_collection=self._parent_collection
                    ) if isinstance(item, (DocumentReference, AsyncDocumentReference))
                    else self._convert_projection_data(item) if isinstance(item, dict)
                    else item
                    for item in value
                ]
            elif isinstance(value, dict):
                # Recursively process nested dictionaries
                result[key] = self._convert_projection_data(value)
            else:
                # Keep primitive values as-is
                result[key] = value
        return result

    # =========================================================================
    # Query Execution Methods
    # =========================================================================

    async def get(self) -> Union[List[AsyncFireObject], List[Dict[str, Any]]]:
        """
        Execute the query and return results as a list.

        Fetches all matching documents asynchronously and hydrates them into
        AsyncFireObject instances in LOADED state. If a projection is active
        (via .select()), returns vanilla dictionaries instead of AsyncFireObject
        instances.

        Returns:
            - If no projection: List of AsyncFireObject instances for all documents
              matching the query.
            - If projection active: List of dictionaries containing only the
              selected fields. DocumentReferences are converted to AsyncFireObjects.
            - Empty list if no documents match.

        Example:
            # Get all results as AsyncFireObjects
            users = await query.get()
            for user in users:
                print(f"{user.name}: {user.birth_year}")

            # Get projected results as dictionaries
            users = await query.select('name', 'email').get()
            for user_dict in users:
                print(f"{user_dict['name']}: {user_dict['email']}")

            # Check if results exist
            results = await query.get()
            if results:
                print(f"Found {len(results)} users")
            else:
                print("No users found")
        """
        # Execute query
        results = []

        # If projection is active, return vanilla dictionaries
        if self._projection:
            async for snap in self._query.stream():
                data = snap.to_dict()
                # Convert DocumentReferences to AsyncFireObjects
                converted_data = self._convert_projection_data(data)
                results.append(converted_data)
            return results

        # Otherwise, return AsyncFireObjects as usual
        async for snapshot in self._query.stream():
            obj = AsyncFireObject.from_snapshot(snapshot, self._parent_collection)
            results.append(obj)
        return results

    async def stream(self) -> Union[AsyncIterator[AsyncFireObject], AsyncIterator[Dict[str, Any]]]:
        """
        Execute the query and stream results as an async iterator.

        Returns an async generator that yields AsyncFireObject instances one at
        a time. This is more memory-efficient than .get() for large result sets
        as it doesn't load all results into memory at once. If a projection
        is active (via .select()), yields vanilla dictionaries instead.

        Yields:
            - If no projection: AsyncFireObject instances in LOADED state for each
              matching document.
            - If projection active: Dictionaries containing only the selected
              fields. DocumentReferences are converted to AsyncFireObjects.

        Example:
            # Stream results one at a time as AsyncFireObjects
            async for user in query.stream():
                print(f"{user.name}: {user.birth_year}")
                # Process each user without loading all users into memory

            # Stream projected results as dictionaries
            async for user_dict in query.select('name', 'email').stream():
                print(f"{user_dict['name']}: {user_dict['email']}")

            # Works with any query
            async for post in (posts
                              .where('published', '==', True)
                              .order_by('date', direction='DESCENDING')
                              .stream()):
                print(post.title)
        """
        # If projection is active, stream vanilla dictionaries
        if self._projection:
            async for snapshot in self._query.stream():
                data = snapshot.to_dict()
                # Convert DocumentReferences to AsyncFireObjects
                converted_data = self._convert_projection_data(data)
                yield converted_data
        else:
            # Otherwise, stream AsyncFireObjects as usual
            async for snapshot in self._query.stream():
                yield AsyncFireObject.from_snapshot(snapshot, self._parent_collection)

    # =========================================================================
    # Real-Time Listeners (Sync-only via sync_client)
    # =========================================================================

    def on_snapshot(self, callback: Any) -> Any:
        """
        Listen for real-time updates to this query.

        This method sets up a real-time listener that fires the callback
        whenever any document matching the query changes. The listener runs
        on a separate thread managed by the Firestore SDK.

        **Important**: This is a sync-only feature. Even for AsyncFireQuery,
        the listener uses a synchronous query (via the parent collection's
        _sync_client) to run on a background thread. This is the standard
        Firestore pattern for real-time listeners in Python.

        Args:
            callback: Callback function invoked on query changes.
                     Signature: callback(query_snapshot, changes, read_time)
                     - query_snapshot: List of DocumentSnapshot objects matching the query
                     - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                     - read_time: Timestamp of the snapshot

        Returns:
            Watch object with an `.unsubscribe()` method to stop listening.

        Example:
            import threading

            callback_done = threading.Event()

            def on_change(query_snapshot, changes, read_time):
                for change in changes:
                    if change.type.name == 'ADDED':
                        print(f"New: {change.document.id}")
                    elif change.type.name == 'MODIFIED':
                        print(f"Modified: {change.document.id}")
                    elif change.type.name == 'REMOVED':
                        print(f"Removed: {change.document.id}")
                callback_done.set()

            # Listen to active users only (async query)
            active_users = users.where('status', '==', 'active')
            watch = active_users.on_snapshot(on_change)

            # Wait for initial snapshot
            callback_done.wait()

            # Later: stop listening
            watch.unsubscribe()

        Note:
            The callback runs on a separate thread. Use threading primitives
            (Event, Lock, Queue) for synchronization with your main thread.
        """
        # Use the native async query's on_snapshot method
        # The Firestore SDK handles the threading internally
        return self._query.on_snapshot(callback)

    def __repr__(self) -> str:
        """Return string representation of the query."""
        return f"<AsyncFireQuery query={self._query}>"

    def __str__(self) -> str:
        """Return human-readable string representation."""
        return f"AsyncFireQuery({self._query})"

__init__(native_query, parent_collection=None, projection=None)

Initialize an AsyncFireQuery.

Args: native_query: The underlying native AsyncQuery object from google-cloud-firestore. parent_collection: Optional reference to parent AsyncFireCollection. projection: Optional tuple of field paths to project (select specific fields).

Source code in src/fire_prox/async_fire_query.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def __init__(
    self,
    native_query: AsyncQuery,
    parent_collection: Optional[Any] = None,
    projection: Optional[tuple] = None,
):
    """
    Initialize an AsyncFireQuery.

    Args:
        native_query: The underlying native AsyncQuery object from google-cloud-firestore.
        parent_collection: Optional reference to parent AsyncFireCollection.
        projection: Optional tuple of field paths to project (select specific fields).
    """
    self._query = native_query
    self._parent_collection = parent_collection
    self._projection = projection

__repr__()

Return string representation of the query.

Source code in src/fire_prox/async_fire_query.py
903
904
905
def __repr__(self) -> str:
    """Return string representation of the query."""
    return f"<AsyncFireQuery query={self._query}>"

__str__()

Return human-readable string representation.

Source code in src/fire_prox/async_fire_query.py
907
908
909
def __str__(self) -> str:
    """Return human-readable string representation."""
    return f"AsyncFireQuery({self._query})"

aggregate(**aggregations) async

Perform multiple aggregations in a single query.

Executes an aggregation query with multiple aggregation operations (count, sum, average) without fetching the actual documents. This is more efficient than running multiple separate aggregation queries.

Args: **aggregations: Named aggregations using Count(), Sum(field), or Avg(field) from fire_prox.aggregation module.

Returns: Dictionary mapping aggregation names to their results.

Raises: ValueError: If no aggregations are provided or if invalid aggregation types are used.

Example: from fire_prox.aggregation import Count, Sum, Avg

# Multiple aggregations in one query
stats = await employees.aggregate(
    total_count=Count(),
    total_salary=Sum('salary'),
    avg_salary=Avg('salary'),
    avg_age=Avg('age')
)
# Returns: {
#     'total_count': 150,
#     'total_salary': 15000000,
#     'avg_salary': 100000.0,
#     'avg_age': 35.2
# }

# With filters
eng_stats = await (employees
                  .where('department', '==', 'Engineering')
                  .aggregate(
                      count=Count(),
                      total_salary=Sum('salary')
                  ))
# Returns: {'count': 50, 'total_salary': 5000000}

# Financial dashboard
financials = await (transactions
                   .where('date', '>=', start_date)
                   .aggregate(
                       total_transactions=Count(),
                       total_revenue=Sum('amount'),
                       avg_transaction=Avg('amount')
                   ))

Note: - Much more efficient than multiple separate aggregation queries - All aggregations execute in a single round-trip to Firestore - Null values are ignored in sum and average calculations

Source code in src/fire_prox/async_fire_query.py
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
async def aggregate(self, **aggregations) -> Dict[str, Any]:
    """
    Perform multiple aggregations in a single query.

    Executes an aggregation query with multiple aggregation operations
    (count, sum, average) without fetching the actual documents. This is
    more efficient than running multiple separate aggregation queries.

    Args:
        **aggregations: Named aggregations using Count(), Sum(field), or
                      Avg(field) from fire_prox.aggregation module.

    Returns:
        Dictionary mapping aggregation names to their results.

    Raises:
        ValueError: If no aggregations are provided or if invalid
                   aggregation types are used.

    Example:
        from fire_prox.aggregation import Count, Sum, Avg

        # Multiple aggregations in one query
        stats = await employees.aggregate(
            total_count=Count(),
            total_salary=Sum('salary'),
            avg_salary=Avg('salary'),
            avg_age=Avg('age')
        )
        # Returns: {
        #     'total_count': 150,
        #     'total_salary': 15000000,
        #     'avg_salary': 100000.0,
        #     'avg_age': 35.2
        # }

        # With filters
        eng_stats = await (employees
                          .where('department', '==', 'Engineering')
                          .aggregate(
                              count=Count(),
                              total_salary=Sum('salary')
                          ))
        # Returns: {'count': 50, 'total_salary': 5000000}

        # Financial dashboard
        financials = await (transactions
                           .where('date', '>=', start_date)
                           .aggregate(
                               total_transactions=Count(),
                               total_revenue=Sum('amount'),
                               avg_transaction=Avg('amount')
                           ))

    Note:
        - Much more efficient than multiple separate aggregation queries
        - All aggregations execute in a single round-trip to Firestore
        - Null values are ignored in sum and average calculations
    """
    if not aggregations:
        raise ValueError("aggregate() requires at least one aggregation")

    from .aggregation import Avg, Count, Sum

    # Start with the first aggregation to create the AsyncAggregationQuery
    first_alias, first_agg_type = next(iter(aggregations.items()))

    if isinstance(first_agg_type, Count):
        agg_query = self._query.count(alias=first_alias)
    elif isinstance(first_agg_type, Sum):
        if not first_agg_type.field:
            raise ValueError(f"Sum aggregation '{first_alias}' is missing a field name")
        agg_query = self._query.sum(first_agg_type.field, alias=first_alias)
    elif isinstance(first_agg_type, Avg):
        if not first_agg_type.field:
            raise ValueError(f"Avg aggregation '{first_alias}' is missing a field name")
        agg_query = self._query.avg(first_agg_type.field, alias=first_alias)
    else:
        raise ValueError(
            f"Invalid aggregation type for '{first_alias}': {type(first_agg_type).__name__}. "
            f"Use Count(), Sum(field), or Avg(field)"
        )

    # Add remaining aggregations
    remaining_items = list(aggregations.items())[1:]
    for alias, agg_type in remaining_items:
        if isinstance(agg_type, Count):
            agg_query = agg_query.count(alias=alias)
        elif isinstance(agg_type, Sum):
            if not agg_type.field:
                raise ValueError(f"Sum aggregation '{alias}' is missing a field name")
            agg_query = agg_query.sum(agg_type.field, alias=alias)
        elif isinstance(agg_type, Avg):
            if not agg_type.field:
                raise ValueError(f"Avg aggregation '{alias}' is missing a field name")
            agg_query = agg_query.avg(agg_type.field, alias=alias)
        else:
            raise ValueError(
                f"Invalid aggregation type for '{alias}': {type(agg_type).__name__}. "
                f"Use Count(), Sum(field), or Avg(field)"
            )

    # Execute and extract results (await the async get method)
    results_dict = {}
    result = await agg_query.get()

    if result and len(result) > 0:
        for agg_result in result:
            # Extract all aggregation results by matching aliases
            for agg in agg_result:
                value = agg.value
                # Convert None to 0 for consistency
                results_dict[agg.alias] = value if value is not None else 0

    return results_dict

avg(field) async

Average a numeric field across all matching documents.

Executes an aggregation query to calculate the arithmetic mean of a specific field without fetching the actual documents. The field must contain numeric values (int or float).

Args: field: Name of the numeric field to average.

Returns: Average of the field values across all matching documents. Returns 0.0 if no documents match or if all values are null.

Raises: ValueError: If field is None or empty.

Example: # Average age of all users avg_age = await users.avg('age') # Returns: 32.5

# Average with filters
avg_salary = await (employees
                   .where('department', '==', 'Engineering')
                   .avg('salary'))
# Returns: 125000.0

# Average rating for active products
avg_rating = await (products
                   .where('active', '==', True)
                   .avg('rating'))
# Returns: 4.2

Note: - Null values are ignored in the average calculation - Non-numeric values will cause an error - This is more efficient than fetching all documents

Source code in src/fire_prox/async_fire_query.py
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
async def avg(self, field: str) -> float:
    """
    Average a numeric field across all matching documents.

    Executes an aggregation query to calculate the arithmetic mean of a
    specific field without fetching the actual documents. The field must
    contain numeric values (int or float).

    Args:
        field: Name of the numeric field to average.

    Returns:
        Average of the field values across all matching documents.
        Returns 0.0 if no documents match or if all values are null.

    Raises:
        ValueError: If field is None or empty.

    Example:
        # Average age of all users
        avg_age = await users.avg('age')
        # Returns: 32.5

        # Average with filters
        avg_salary = await (employees
                           .where('department', '==', 'Engineering')
                           .avg('salary'))
        # Returns: 125000.0

        # Average rating for active products
        avg_rating = await (products
                           .where('active', '==', True)
                           .avg('rating'))
        # Returns: 4.2

    Note:
        - Null values are ignored in the average calculation
        - Non-numeric values will cause an error
        - This is more efficient than fetching all documents
    """
    if not field:
        raise ValueError("avg() requires a field name")

    # Create async aggregation query using AsyncQuery's avg method
    agg_query = self._query.avg(field, alias='avg')

    # Execute and extract result (await the async get method)
    result = await agg_query.get()
    if result and len(result) > 0:
        # Extract average from first (and only) aggregation result
        for agg_result in result:
            value = agg_result[0].value
            return value if value is not None else 0.0
    return 0.0

count() async

Count documents matching the query.

Executes an aggregation query to count the number of documents that match the current query filters without fetching the actual documents. This is more efficient than fetching all documents and counting them.

Returns: Integer count of matching documents. Returns 0 if no documents match.

Example: # Count all users total_users = await users.count() # Returns: 150

# Count with filters
active_users = await users.where('active', '==', True).count()
# Returns: 42

# Count with complex query
count = await (users
              .where('age', '>', 25)
              .where('country', '==', 'USA')
              .count())
# Returns: 37

Note: This uses Firestore's native aggregation API, which is more efficient than fetching documents. However, it still counts as one document read per 1000 documents in the collection.

Source code in src/fire_prox/async_fire_query.py
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
async def count(self) -> int:
    """
    Count documents matching the query.

    Executes an aggregation query to count the number of documents that
    match the current query filters without fetching the actual documents.
    This is more efficient than fetching all documents and counting them.

    Returns:
        Integer count of matching documents. Returns 0 if no documents match.

    Example:
        # Count all users
        total_users = await users.count()
        # Returns: 150

        # Count with filters
        active_users = await users.where('active', '==', True).count()
        # Returns: 42

        # Count with complex query
        count = await (users
                      .where('age', '>', 25)
                      .where('country', '==', 'USA')
                      .count())
        # Returns: 37

    Note:
        This uses Firestore's native aggregation API, which is more efficient
        than fetching documents. However, it still counts as one document read
        per 1000 documents in the collection.
    """
    # Create async aggregation query using AsyncQuery's count method
    agg_query = self._query.count(alias='count')

    # Execute and extract result (await the async get method)
    result = await agg_query.get()
    if result and len(result) > 0:
        # Extract count from first (and only) aggregation result
        for agg_result in result:
            return agg_result[0].value
    return 0

end_at(*document_fields_or_snapshot)

End query results at a cursor position (inclusive).

Creates a new AsyncFireQuery that ends at the specified cursor. The cursor document is included in the results.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot - Direct field values matching order_by clause order

Returns: A new AsyncFireQuery instance with the end cursor applied.

Example: # Get all users up to and including age 50 query = users.order_by('age').end_at({'age': 50})

# Using a specific document as endpoint
target_doc_ref = users.doc('user123')._doc_ref
target_snapshot = await target_doc_ref.get()
query = users.order_by('age').end_at(target_snapshot)
Source code in src/fire_prox/async_fire_query.py
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
def end_at(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
    """
    End query results at a cursor position (inclusive).

    Creates a new AsyncFireQuery that ends at the specified cursor. The cursor
    document is included in the results.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot
            - Direct field values matching order_by clause order

    Returns:
        A new AsyncFireQuery instance with the end cursor applied.

    Example:
        # Get all users up to and including age 50
        query = users.order_by('age').end_at({'age': 50})

        # Using a specific document as endpoint
        target_doc_ref = users.doc('user123')._doc_ref
        target_snapshot = await target_doc_ref.get()
        query = users.order_by('age').end_at(target_snapshot)
    """
    new_query = self._query.end_at(*document_fields_or_snapshot)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

end_before(*document_fields_or_snapshot)

End query results before a cursor position (exclusive).

Creates a new AsyncFireQuery that ends before the specified cursor. The cursor document itself is excluded from results.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot - Direct field values matching order_by clause order

Returns: A new AsyncFireQuery instance with the end-before cursor applied.

Example: # Get all users before age 50 (exclude 50) query = users.order_by('age').end_before({'age': 50})

# Using a specific document as exclusive endpoint
target_doc_ref = users.doc('user123')._doc_ref
target_snapshot = await target_doc_ref.get()
query = users.order_by('age').end_before(target_snapshot)
Source code in src/fire_prox/async_fire_query.py
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def end_before(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
    """
    End query results before a cursor position (exclusive).

    Creates a new AsyncFireQuery that ends before the specified cursor. The cursor
    document itself is excluded from results.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot
            - Direct field values matching order_by clause order

    Returns:
        A new AsyncFireQuery instance with the end-before cursor applied.

    Example:
        # Get all users before age 50 (exclude 50)
        query = users.order_by('age').end_before({'age': 50})

        # Using a specific document as exclusive endpoint
        target_doc_ref = users.doc('user123')._doc_ref
        target_snapshot = await target_doc_ref.get()
        query = users.order_by('age').end_before(target_snapshot)
    """
    new_query = self._query.end_before(*document_fields_or_snapshot)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

find_nearest(vector_field, query_vector, distance_measure, limit, distance_result_field=None)

Find the nearest neighbors based on vector similarity.

Performs a vector similarity search on top of the current query filters. This allows you to combine pre-filtering with vector search (requires a composite index).

Args: vector_field: Name of the field containing vector embeddings. query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector). distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN, DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT). limit: Maximum number of nearest neighbors to return (max 1000). distance_result_field: Optional field name to store the calculated distance in the query results.

Returns: A new AsyncFireQuery instance with the vector search applied.

Example: from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector

# Find nearest neighbors with pre-filtering
query = (collection
         .where('category', '==', 'tech')
         .find_nearest(
             vector_field="embedding",
             query_vector=Vector([0.1, 0.2, 0.3]),
             distance_measure=DistanceMeasure.COSINE,
             limit=5
         ))
async for doc in query.stream():
    print(f"{doc.title}: {doc.category}")

Note: - Requires a composite index when combining with where() clauses - Maximum limit is 1000 documents - Does not work with Firestore emulator (production only)

Source code in src/fire_prox/async_fire_query.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
def find_nearest(
    self,
    vector_field: str,
    query_vector: Any,
    distance_measure: Any,
    limit: int,
    distance_result_field: Optional[str] = None,
) -> 'AsyncFireQuery':
    """
    Find the nearest neighbors based on vector similarity.

    Performs a vector similarity search on top of the current query filters.
    This allows you to combine pre-filtering with vector search (requires
    a composite index).

    Args:
        vector_field: Name of the field containing vector embeddings.
        query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
        distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
            DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
        limit: Maximum number of nearest neighbors to return (max 1000).
        distance_result_field: Optional field name to store the calculated distance
            in the query results.

    Returns:
        A new AsyncFireQuery instance with the vector search applied.

    Example:
        from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
        from google.cloud.firestore_v1.vector import Vector

        # Find nearest neighbors with pre-filtering
        query = (collection
                 .where('category', '==', 'tech')
                 .find_nearest(
                     vector_field="embedding",
                     query_vector=Vector([0.1, 0.2, 0.3]),
                     distance_measure=DistanceMeasure.COSINE,
                     limit=5
                 ))
        async for doc in query.stream():
            print(f"{doc.title}: {doc.category}")

    Note:
        - Requires a composite index when combining with where() clauses
        - Maximum limit is 1000 documents
        - Does not work with Firestore emulator (production only)
    """
    # Create vector query using native find_nearest
    new_query = self._query.find_nearest(
        vector_field=vector_field,
        query_vector=query_vector,
        distance_measure=distance_measure,
        limit=limit,
        distance_result_field=distance_result_field,
    )
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

get() async

Execute the query and return results as a list.

Fetches all matching documents asynchronously and hydrates them into AsyncFireObject instances in LOADED state. If a projection is active (via .select()), returns vanilla dictionaries instead of AsyncFireObject instances.

Returns: - If no projection: List of AsyncFireObject instances for all documents matching the query. - If projection active: List of dictionaries containing only the selected fields. DocumentReferences are converted to AsyncFireObjects. - Empty list if no documents match.

Example: # Get all results as AsyncFireObjects users = await query.get() for user in users: print(f"{user.name}: {user.birth_year}")

# Get projected results as dictionaries
users = await query.select('name', 'email').get()
for user_dict in users:
    print(f"{user_dict['name']}: {user_dict['email']}")

# Check if results exist
results = await query.get()
if results:
    print(f"Found {len(results)} users")
else:
    print("No users found")
Source code in src/fire_prox/async_fire_query.py
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
async def get(self) -> Union[List[AsyncFireObject], List[Dict[str, Any]]]:
    """
    Execute the query and return results as a list.

    Fetches all matching documents asynchronously and hydrates them into
    AsyncFireObject instances in LOADED state. If a projection is active
    (via .select()), returns vanilla dictionaries instead of AsyncFireObject
    instances.

    Returns:
        - If no projection: List of AsyncFireObject instances for all documents
          matching the query.
        - If projection active: List of dictionaries containing only the
          selected fields. DocumentReferences are converted to AsyncFireObjects.
        - Empty list if no documents match.

    Example:
        # Get all results as AsyncFireObjects
        users = await query.get()
        for user in users:
            print(f"{user.name}: {user.birth_year}")

        # Get projected results as dictionaries
        users = await query.select('name', 'email').get()
        for user_dict in users:
            print(f"{user_dict['name']}: {user_dict['email']}")

        # Check if results exist
        results = await query.get()
        if results:
            print(f"Found {len(results)} users")
        else:
            print("No users found")
    """
    # Execute query
    results = []

    # If projection is active, return vanilla dictionaries
    if self._projection:
        async for snap in self._query.stream():
            data = snap.to_dict()
            # Convert DocumentReferences to AsyncFireObjects
            converted_data = self._convert_projection_data(data)
            results.append(converted_data)
        return results

    # Otherwise, return AsyncFireObjects as usual
    async for snapshot in self._query.stream():
        obj = AsyncFireObject.from_snapshot(snapshot, self._parent_collection)
        results.append(obj)
    return results

limit(count)

Limit the number of results returned.

Creates a new AsyncFireQuery that will return at most count results.

Args: count: Maximum number of documents to return. Must be positive.

Returns: A new AsyncFireQuery instance with the limit applied.

Raises: ValueError: If count is not positive.

Example: # Get top 10 results query = users.order_by('score', direction='DESCENDING').limit(10)

# Get first 5 matching documents
query = users.where('active', '==', True).limit(5)
Source code in src/fire_prox/async_fire_query.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
def limit(self, count: int) -> 'AsyncFireQuery':
    """
    Limit the number of results returned.

    Creates a new AsyncFireQuery that will return at most `count` results.

    Args:
        count: Maximum number of documents to return. Must be positive.

    Returns:
        A new AsyncFireQuery instance with the limit applied.

    Raises:
        ValueError: If count is not positive.

    Example:
        # Get top 10 results
        query = users.order_by('score', direction='DESCENDING').limit(10)

        # Get first 5 matching documents
        query = users.where('active', '==', True).limit(5)
    """
    if count <= 0:
        raise ValueError(f"Limit count must be positive, got {count}")

    new_query = self._query.limit(count)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

on_snapshot(callback)

Listen for real-time updates to this query.

This method sets up a real-time listener that fires the callback whenever any document matching the query changes. The listener runs on a separate thread managed by the Firestore SDK.

Important: This is a sync-only feature. Even for AsyncFireQuery, the listener uses a synchronous query (via the parent collection's _sync_client) to run on a background thread. This is the standard Firestore pattern for real-time listeners in Python.

Args: callback: Callback function invoked on query changes. Signature: callback(query_snapshot, changes, read_time) - query_snapshot: List of DocumentSnapshot objects matching the query - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED) - read_time: Timestamp of the snapshot

Returns: Watch object with an .unsubscribe() method to stop listening.

Example: import threading

callback_done = threading.Event()

def on_change(query_snapshot, changes, read_time):
    for change in changes:
        if change.type.name == 'ADDED':
            print(f"New: {change.document.id}")
        elif change.type.name == 'MODIFIED':
            print(f"Modified: {change.document.id}")
        elif change.type.name == 'REMOVED':
            print(f"Removed: {change.document.id}")
    callback_done.set()

# Listen to active users only (async query)
active_users = users.where('status', '==', 'active')
watch = active_users.on_snapshot(on_change)

# Wait for initial snapshot
callback_done.wait()

# Later: stop listening
watch.unsubscribe()

Note: The callback runs on a separate thread. Use threading primitives (Event, Lock, Queue) for synchronization with your main thread.

Source code in src/fire_prox/async_fire_query.py
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
def on_snapshot(self, callback: Any) -> Any:
    """
    Listen for real-time updates to this query.

    This method sets up a real-time listener that fires the callback
    whenever any document matching the query changes. The listener runs
    on a separate thread managed by the Firestore SDK.

    **Important**: This is a sync-only feature. Even for AsyncFireQuery,
    the listener uses a synchronous query (via the parent collection's
    _sync_client) to run on a background thread. This is the standard
    Firestore pattern for real-time listeners in Python.

    Args:
        callback: Callback function invoked on query changes.
                 Signature: callback(query_snapshot, changes, read_time)
                 - query_snapshot: List of DocumentSnapshot objects matching the query
                 - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                 - read_time: Timestamp of the snapshot

    Returns:
        Watch object with an `.unsubscribe()` method to stop listening.

    Example:
        import threading

        callback_done = threading.Event()

        def on_change(query_snapshot, changes, read_time):
            for change in changes:
                if change.type.name == 'ADDED':
                    print(f"New: {change.document.id}")
                elif change.type.name == 'MODIFIED':
                    print(f"Modified: {change.document.id}")
                elif change.type.name == 'REMOVED':
                    print(f"Removed: {change.document.id}")
            callback_done.set()

        # Listen to active users only (async query)
        active_users = users.where('status', '==', 'active')
        watch = active_users.on_snapshot(on_change)

        # Wait for initial snapshot
        callback_done.wait()

        # Later: stop listening
        watch.unsubscribe()

    Note:
        The callback runs on a separate thread. Use threading primitives
        (Event, Lock, Queue) for synchronization with your main thread.
    """
    # Use the native async query's on_snapshot method
    # The Firestore SDK handles the threading internally
    return self._query.on_snapshot(callback)

order_by(field, direction='ASCENDING')

Add an ordering clause to the query.

Creates a new AsyncFireQuery with ordering by the specified field.

Args: field: The field path to order by. direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

Returns: A new AsyncFireQuery instance with the ordering applied.

Example: # Ascending order query = users.order_by('birth_year')

# Descending order
query = users.order_by('birth_year', direction='DESCENDING')

# Multiple orderings (chained)
query = (users
         .order_by('country')
         .order_by('birth_year', direction='DESCENDING'))
Source code in src/fire_prox/async_fire_query.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def order_by(self, field: str, direction: str = 'ASCENDING') -> 'AsyncFireQuery':
    """
    Add an ordering clause to the query.

    Creates a new AsyncFireQuery with ordering by the specified field.

    Args:
        field: The field path to order by.
        direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'.
                  Default is 'ASCENDING'.

    Returns:
        A new AsyncFireQuery instance with the ordering applied.

    Example:
        # Ascending order
        query = users.order_by('birth_year')

        # Descending order
        query = users.order_by('birth_year', direction='DESCENDING')

        # Multiple orderings (chained)
        query = (users
                 .order_by('country')
                 .order_by('birth_year', direction='DESCENDING'))
    """
    # Convert direction string to Query constant
    if direction.upper() == 'ASCENDING':
        from google.cloud.firestore_v1 import Query as QueryClass
        direction_const = QueryClass.ASCENDING
    elif direction.upper() == 'DESCENDING':
        from google.cloud.firestore_v1 import Query as QueryClass
        direction_const = QueryClass.DESCENDING
    else:
        raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

    new_query = self._query.order_by(field, direction=direction_const)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

select(*field_paths)

Select specific fields to return (projection).

Creates a new AsyncFireQuery that only returns the specified fields in the query results. When using projections, query results will be returned as vanilla dictionaries instead of AsyncFireObject instances. Any DocumentReferences in the returned dictionaries will be automatically converted to AsyncFireObject instances in ATTACHED state.

Args: *field_paths: One or more field paths to select. Field paths can include nested fields using dot notation (e.g., 'address.city').

Returns: A new AsyncFireQuery instance with the projection applied.

Raises: ValueError: If no field paths are provided.

Example: # Select a single field query = users.select('name') results = await query.get() # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

# Select multiple fields
query = users.select('name', 'email', 'birth_year')
results = await query.get()
# Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

# Select with filtering and ordering
query = (users
         .where('birth_year', '>', 1990)
         .select('name', 'birth_year')
         .order_by('birth_year')
         .limit(10))

# DocumentReferences are auto-converted to AsyncFireObjects
query = posts.select('title', 'author')  # author is a DocumentReference
results = await query.get()
# results[0]['author'] is an AsyncFireObject, not a DocumentReference
await results[0]['author'].fetch()
print(results[0]['author'].name)

Note: - Projection queries return dictionaries, not AsyncFireObject instances - Only the selected fields will be present in the returned dictionaries - DocumentReferences are automatically hydrated to AsyncFireObject instances - Projected results are more bandwidth-efficient for large documents

Source code in src/fire_prox/async_fire_query.py
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
def select(self, *field_paths: str) -> 'AsyncFireQuery':
    """
    Select specific fields to return (projection).

    Creates a new AsyncFireQuery that only returns the specified fields in the
    query results. When using projections, query results will be returned
    as vanilla dictionaries instead of AsyncFireObject instances. Any
    DocumentReferences in the returned dictionaries will be automatically
    converted to AsyncFireObject instances in ATTACHED state.

    Args:
        *field_paths: One or more field paths to select. Field paths can
                     include nested fields using dot notation (e.g., 'address.city').

    Returns:
        A new AsyncFireQuery instance with the projection applied.

    Raises:
        ValueError: If no field paths are provided.

    Example:
        # Select a single field
        query = users.select('name')
        results = await query.get()
        # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

        # Select multiple fields
        query = users.select('name', 'email', 'birth_year')
        results = await query.get()
        # Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

        # Select with filtering and ordering
        query = (users
                 .where('birth_year', '>', 1990)
                 .select('name', 'birth_year')
                 .order_by('birth_year')
                 .limit(10))

        # DocumentReferences are auto-converted to AsyncFireObjects
        query = posts.select('title', 'author')  # author is a DocumentReference
        results = await query.get()
        # results[0]['author'] is an AsyncFireObject, not a DocumentReference
        await results[0]['author'].fetch()
        print(results[0]['author'].name)

    Note:
        - Projection queries return dictionaries, not AsyncFireObject instances
        - Only the selected fields will be present in the returned dictionaries
        - DocumentReferences are automatically hydrated to AsyncFireObject instances
        - Projected results are more bandwidth-efficient for large documents
    """
    if not field_paths:
        raise ValueError("select() requires at least one field path")

    # Create new query with projection
    new_query = self._query.select(list(field_paths))
    return AsyncFireQuery(new_query, self._parent_collection, projection=field_paths)

start_after(*document_fields_or_snapshot)

Start query results after a cursor position (exclusive).

Creates a new AsyncFireQuery that starts after the specified cursor. The cursor document itself is excluded from results. This is typically used for pagination to avoid duplicating the last document from the previous page.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot from a previous query - Direct field values matching order_by clause order

Returns: A new AsyncFireQuery instance with the start-after cursor applied.

Example: # Pagination: exclude the last document from previous page page1 = await users.order_by('age').limit(10).get() last_age = page1[-1].age page2 = await users.order_by('age').start_after({'age': last_age}).limit(10).get()

# Using a document snapshot (common pattern)
last_doc_ref = page1[-1]._doc_ref
last_snapshot = await last_doc_ref.get()
page2 = await users.order_by('age').start_after(last_snapshot).limit(10).get()
Source code in src/fire_prox/async_fire_query.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
def start_after(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
    """
    Start query results after a cursor position (exclusive).

    Creates a new AsyncFireQuery that starts after the specified cursor. The cursor
    document itself is excluded from results. This is typically used for
    pagination to avoid duplicating the last document from the previous page.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot from a previous query
            - Direct field values matching order_by clause order

    Returns:
        A new AsyncFireQuery instance with the start-after cursor applied.

    Example:
        # Pagination: exclude the last document from previous page
        page1 = await users.order_by('age').limit(10).get()
        last_age = page1[-1].age
        page2 = await users.order_by('age').start_after({'age': last_age}).limit(10).get()

        # Using a document snapshot (common pattern)
        last_doc_ref = page1[-1]._doc_ref
        last_snapshot = await last_doc_ref.get()
        page2 = await users.order_by('age').start_after(last_snapshot).limit(10).get()
    """
    new_query = self._query.start_after(*document_fields_or_snapshot)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

start_at(*document_fields_or_snapshot)

Start query results at a cursor position (inclusive).

Creates a new AsyncFireQuery that starts at the specified cursor. The cursor can be a document snapshot or a dictionary of field values matching the order_by fields.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot from a previous query - Direct field values matching order_by clause order

Returns: A new AsyncFireQuery instance with the start cursor applied.

Example: # Using field values (requires matching order_by) query = users.order_by('age').start_at({'age': 25})

# Pagination: get first page, then start at last document
page1 = await users.order_by('age').limit(10).get()
last_age = page1[-1].age
page2 = await users.order_by('age').start_at({'age': last_age}).limit(10).get()

# Using a document snapshot
last_doc_ref = page1[-1]._doc_ref
last_snapshot = await last_doc_ref.get()
page2 = await users.order_by('age').start_at(last_snapshot).limit(10).get()
Source code in src/fire_prox/async_fire_query.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
def start_at(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
    """
    Start query results at a cursor position (inclusive).

    Creates a new AsyncFireQuery that starts at the specified cursor. The cursor
    can be a document snapshot or a dictionary of field values matching the
    order_by fields.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot from a previous query
            - Direct field values matching order_by clause order

    Returns:
        A new AsyncFireQuery instance with the start cursor applied.

    Example:
        # Using field values (requires matching order_by)
        query = users.order_by('age').start_at({'age': 25})

        # Pagination: get first page, then start at last document
        page1 = await users.order_by('age').limit(10).get()
        last_age = page1[-1].age
        page2 = await users.order_by('age').start_at({'age': last_age}).limit(10).get()

        # Using a document snapshot
        last_doc_ref = page1[-1]._doc_ref
        last_snapshot = await last_doc_ref.get()
        page2 = await users.order_by('age').start_at(last_snapshot).limit(10).get()
    """
    new_query = self._query.start_at(*document_fields_or_snapshot)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

stream() async

Execute the query and stream results as an async iterator.

Returns an async generator that yields AsyncFireObject instances one at a time. This is more memory-efficient than .get() for large result sets as it doesn't load all results into memory at once. If a projection is active (via .select()), yields vanilla dictionaries instead.

Yields: - If no projection: AsyncFireObject instances in LOADED state for each matching document. - If projection active: Dictionaries containing only the selected fields. DocumentReferences are converted to AsyncFireObjects.

Example: # Stream results one at a time as AsyncFireObjects async for user in query.stream(): print(f"{user.name}: {user.birth_year}") # Process each user without loading all users into memory

# Stream projected results as dictionaries
async for user_dict in query.select('name', 'email').stream():
    print(f"{user_dict['name']}: {user_dict['email']}")

# Works with any query
async for post in (posts
                  .where('published', '==', True)
                  .order_by('date', direction='DESCENDING')
                  .stream()):
    print(post.title)
Source code in src/fire_prox/async_fire_query.py
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
async def stream(self) -> Union[AsyncIterator[AsyncFireObject], AsyncIterator[Dict[str, Any]]]:
    """
    Execute the query and stream results as an async iterator.

    Returns an async generator that yields AsyncFireObject instances one at
    a time. This is more memory-efficient than .get() for large result sets
    as it doesn't load all results into memory at once. If a projection
    is active (via .select()), yields vanilla dictionaries instead.

    Yields:
        - If no projection: AsyncFireObject instances in LOADED state for each
          matching document.
        - If projection active: Dictionaries containing only the selected
          fields. DocumentReferences are converted to AsyncFireObjects.

    Example:
        # Stream results one at a time as AsyncFireObjects
        async for user in query.stream():
            print(f"{user.name}: {user.birth_year}")
            # Process each user without loading all users into memory

        # Stream projected results as dictionaries
        async for user_dict in query.select('name', 'email').stream():
            print(f"{user_dict['name']}: {user_dict['email']}")

        # Works with any query
        async for post in (posts
                          .where('published', '==', True)
                          .order_by('date', direction='DESCENDING')
                          .stream()):
            print(post.title)
    """
    # If projection is active, stream vanilla dictionaries
    if self._projection:
        async for snapshot in self._query.stream():
            data = snapshot.to_dict()
            # Convert DocumentReferences to AsyncFireObjects
            converted_data = self._convert_projection_data(data)
            yield converted_data
    else:
        # Otherwise, stream AsyncFireObjects as usual
        async for snapshot in self._query.stream():
            yield AsyncFireObject.from_snapshot(snapshot, self._parent_collection)

sum(field) async

Sum a numeric field across all matching documents.

Executes an aggregation query to sum the values of a specific field without fetching the actual documents. The field must contain numeric values (int or float).

Args: field: Name of the numeric field to sum.

Returns: Sum of the field values across all matching documents. Returns 0 if no documents match or if all values are null.

Raises: ValueError: If field is None or empty.

Example: # Sum all salaries total_salary = await employees.sum('salary') # Returns: 5000000

# Sum with filters
engineering_salary = await (employees
                           .where('department', '==', 'Engineering')
                           .sum('salary'))
# Returns: 2500000

# Sum revenue from active products
total_revenue = await (products
                      .where('active', '==', True)
                      .sum('revenue'))
# Returns: 1250000.50

Note: - Null values are ignored in the sum - Non-numeric values will cause an error - This is more efficient than fetching all documents

Source code in src/fire_prox/async_fire_query.py
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
async def sum(self, field: str) -> Union[int, float]:
    """
    Sum a numeric field across all matching documents.

    Executes an aggregation query to sum the values of a specific field
    without fetching the actual documents. The field must contain numeric
    values (int or float).

    Args:
        field: Name of the numeric field to sum.

    Returns:
        Sum of the field values across all matching documents.
        Returns 0 if no documents match or if all values are null.

    Raises:
        ValueError: If field is None or empty.

    Example:
        # Sum all salaries
        total_salary = await employees.sum('salary')
        # Returns: 5000000

        # Sum with filters
        engineering_salary = await (employees
                                   .where('department', '==', 'Engineering')
                                   .sum('salary'))
        # Returns: 2500000

        # Sum revenue from active products
        total_revenue = await (products
                              .where('active', '==', True)
                              .sum('revenue'))
        # Returns: 1250000.50

    Note:
        - Null values are ignored in the sum
        - Non-numeric values will cause an error
        - This is more efficient than fetching all documents
    """
    if not field:
        raise ValueError("sum() requires a field name")

    # Create async aggregation query using AsyncQuery's sum method
    agg_query = self._query.sum(field, alias='sum')

    # Execute and extract result (await the async get method)
    result = await agg_query.get()
    if result and len(result) > 0:
        # Extract sum from first (and only) aggregation result
        for agg_result in result:
            return agg_result[0].value
    return 0

where(field, op, value)

Add a filter condition to the query.

Creates a new AsyncFireQuery with an additional filter condition. Uses the immutable pattern - returns a new instance rather than modifying the current query.

Args: field: The field path to filter on (e.g., 'name', 'address.city'). op: Comparison operator. Supported operators: '==' (equal), '!=' (not equal), '<' (less than), '<=' (less than or equal), '>' (greater than), '>=' (greater than or equal), 'in' (value in list), 'not-in' (value not in list), 'array-contains' (array contains value), 'array-contains-any' (array contains any of the values). value: The value to compare against.

Returns: A new AsyncFireQuery instance with the added filter.

Example: # Single condition query = users.where('birth_year', '>', 1800)

# Multiple conditions (chained)
query = (users
         .where('birth_year', '>', 1800)
         .where('country', '==', 'England'))
Source code in src/fire_prox/async_fire_query.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
def where(self, field: str, op: str, value: Any) -> 'AsyncFireQuery':
    """
    Add a filter condition to the query.

    Creates a new AsyncFireQuery with an additional filter condition.
    Uses the immutable pattern - returns a new instance rather than
    modifying the current query.

    Args:
        field: The field path to filter on (e.g., 'name', 'address.city').
        op: Comparison operator. Supported operators:
            '==' (equal), '!=' (not equal),
            '<' (less than), '<=' (less than or equal),
            '>' (greater than), '>=' (greater than or equal),
            'in' (value in list), 'not-in' (value not in list),
            'array-contains' (array contains value),
            'array-contains-any' (array contains any of the values).
        value: The value to compare against.

    Returns:
        A new AsyncFireQuery instance with the added filter.

    Example:
        # Single condition
        query = users.where('birth_year', '>', 1800)

        # Multiple conditions (chained)
        query = (users
                 .where('birth_year', '>', 1800)
                 .where('country', '==', 'England'))
    """
    # Create FieldFilter and add to query
    filter_obj = FieldFilter(field, op, value)
    new_query = self._query.where(filter=filter_obj)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

Avg

Bases: AggregationType

Average aggregation - averages a numeric field across documents.

Requires a field name. The field must contain numeric values (int or float). Returns the arithmetic mean of all non-null values.

Example: # Average age avg_age = users.avg('age')

# Average via aggregate()
result = users.aggregate(avg_rating=Avg('rating'))
# Returns: {'avg_rating': 4.2}

Args: field: Name of the numeric field to average.

Raises: ValueError: If field is not provided.

Source code in src/fire_prox/aggregation.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
class Avg(AggregationType):
    """
    Average aggregation - averages a numeric field across documents.

    Requires a field name. The field must contain numeric values (int or float).
    Returns the arithmetic mean of all non-null values.

    Example:
        # Average age
        avg_age = users.avg('age')

        # Average via aggregate()
        result = users.aggregate(avg_rating=Avg('rating'))
        # Returns: {'avg_rating': 4.2}

    Args:
        field: Name of the numeric field to average.

    Raises:
        ValueError: If field is not provided.
    """

    def __init__(self, field: str):
        """
        Initialize Avg aggregation.

        Args:
            field: Name of the numeric field to average.

        Raises:
            ValueError: If field is None or empty.
        """
        if not field:
            raise ValueError("Avg aggregation requires a field name")
        super().__init__(field=field)

__init__(field)

Initialize Avg aggregation.

Args: field: Name of the numeric field to average.

Raises: ValueError: If field is None or empty.

Source code in src/fire_prox/aggregation.py
122
123
124
125
126
127
128
129
130
131
132
133
134
def __init__(self, field: str):
    """
    Initialize Avg aggregation.

    Args:
        field: Name of the numeric field to average.

    Raises:
        ValueError: If field is None or empty.
    """
    if not field:
        raise ValueError("Avg aggregation requires a field name")
    super().__init__(field=field)

Count

Bases: AggregationType

Count aggregation - counts matching documents.

Does not require a field name since it counts documents, not field values.

Example: # Count all active users count = users.where('active', '==', True).count()

# Count via aggregate()
result = users.aggregate(total_users=Count())
# Returns: {'total_users': 42}
Source code in src/fire_prox/aggregation.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
class Count(AggregationType):
    """
    Count aggregation - counts matching documents.

    Does not require a field name since it counts documents, not field values.

    Example:
        # Count all active users
        count = users.where('active', '==', True).count()

        # Count via aggregate()
        result = users.aggregate(total_users=Count())
        # Returns: {'total_users': 42}
    """

    def __init__(self):
        """Initialize Count aggregation (no field needed)."""
        super().__init__(field=None)

__init__()

Initialize Count aggregation (no field needed).

Source code in src/fire_prox/aggregation.py
59
60
61
def __init__(self):
    """Initialize Count aggregation (no field needed)."""
    super().__init__(field=None)

FireCollection

Bases: BaseFireCollection

A wrapper around Firestore CollectionReference for document management (synchronous).

FireCollection provides a simplified interface for creating new documents and querying collections. It serves as a factory for FireObject instances and (in Phase 2) will provide a lightweight query builder.

This is the synchronous implementation.

Usage Examples: # Get a collection users = db.collection('users')

# Create a new document in DETACHED state
new_user = users.new()
new_user.name = 'Ada Lovelace'
new_user.year = 1815
new_user.save()

# Create with explicit ID
user = users.new()
user.name = 'Charles Babbage'
user.save(doc_id='cbabbage')

# Phase 2: Query the collection
query = users.where('year', '>', 1800).limit(10)
for user in query.get():
    print(user.name)
Source code in src/fire_prox/fire_collection.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
class FireCollection(BaseFireCollection):
    """
    A wrapper around Firestore CollectionReference for document management (synchronous).

    FireCollection provides a simplified interface for creating new documents
    and querying collections. It serves as a factory for FireObject instances
    and (in Phase 2) will provide a lightweight query builder.

    This is the synchronous implementation.

    Usage Examples:
        # Get a collection
        users = db.collection('users')

        # Create a new document in DETACHED state
        new_user = users.new()
        new_user.name = 'Ada Lovelace'
        new_user.year = 1815
        new_user.save()

        # Create with explicit ID
        user = users.new()
        user.name = 'Charles Babbage'
        user.save(doc_id='cbabbage')

        # Phase 2: Query the collection
        query = users.where('year', '>', 1800).limit(10)
        for user in query.get():
            print(user.name)
    """

    # =========================================================================
    # Document Creation
    # =========================================================================

    def _instantiate_object(
        self,
        *,
        doc_ref: Any,
        initial_state: State,
        parent_collection: 'FireCollection',
        **_: Any,
    ) -> FireObject:
        """Instantiate the synchronous FireObject wrapper."""
        return FireObject(
            doc_ref=doc_ref,
            initial_state=initial_state,
            parent_collection=parent_collection,
        )

    def new(self) -> FireObject:
        """Create a new FireObject in DETACHED state."""
        return super().new()

    def doc(self, doc_id: str) -> FireObject:
        """Get a reference to a specific document in this collection."""
        return super().doc(doc_id)

    # =========================================================================
    # Parent Property (Phase 2)
    # =========================================================================

    @property
    def parent(self) -> Optional[FireObject]:
        """
        Get the parent document if this is a subcollection.

        Returns:
            FireObject representing the parent document if this is a
            subcollection, None if this is a root-level collection.

        Note:
            Phase 2 feature. Returns None in Phase 1 as subcollections
            are not yet implemented.

        Example:
            posts = db.doc('users/alovelace').collection('posts')
            parent = posts.parent
            print(parent.path)  # 'users/alovelace'
        """
        raise NotImplementedError("Phase 2 feature - subcollections")

    # =========================================================================
    # Query Methods (Phase 2)
    # =========================================================================

    def where(self, field: str, op: str, value: Any) -> 'FireQuery':
        """
        Create a query with a filter condition.

        Phase 2.5 feature. Builds a lightweight query for common filtering
        needs. For complex queries, users should use the native API and
        hydrate results with FireObject.from_snapshot().

        Args:
            field: The field path to filter on (e.g., 'name', 'address.city').
            op: Comparison operator: '==', '!=', '<', '<=', '>', '>=',
                'in', 'not-in', 'array-contains', 'array-contains-any'.
            value: The value to compare against.

        Returns:
            A FireQuery instance for method chaining.

        Example:
            query = users.where('birth_year', '>', 1800)
                        .where('country', '==', 'UK')
                        .limit(10)
            for user in query.get():
                print(user.name)
        """
        from google.cloud.firestore_v1.base_query import FieldFilter

        from .fire_query import FireQuery

        # Create initial query with filter
        filter_obj = FieldFilter(field, op, value)
        native_query = self._collection_ref.where(filter=filter_obj)
        return FireQuery(native_query, parent_collection=self)

    def order_by(
        self,
        field: str,
        direction: str = 'ASCENDING'
    ) -> 'FireQuery':
        """
        Create a query with ordering.

        Phase 2.5 feature. Orders results by a field.

        Args:
            field: The field path to order by.
            direction: 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

        Returns:
            A FireQuery instance for method chaining.
        """
        from google.cloud.firestore_v1 import Query as QueryClass

        from .fire_query import FireQuery

        # Convert direction string to constant
        if direction.upper() == 'ASCENDING':
            direction_const = QueryClass.ASCENDING
        elif direction.upper() == 'DESCENDING':
            direction_const = QueryClass.DESCENDING
        else:
            raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

        # Create query with ordering
        native_query = self._collection_ref.order_by(field, direction=direction_const)
        return FireQuery(native_query, parent_collection=self)

    def limit(self, count: int) -> 'FireQuery':
        """
        Create a query with a result limit.

        Phase 2.5 feature. Limits the number of results returned.

        Args:
            count: Maximum number of results to return.

        Returns:
            A FireQuery instance for method chaining.
        """
        from .fire_query import FireQuery

        if count <= 0:
            raise ValueError(f"Limit count must be positive, got {count}")

        # Create query with limit
        native_query = self._collection_ref.limit(count)
        return FireQuery(native_query, parent_collection=self)

    def select(self, *field_paths: str) -> 'FireQuery':
        """
        Create a query with field projection.

        Phase 4 Part 3 feature. Selects specific fields to return in query results.
        Returns vanilla dictionaries instead of FireObject instances.

        Args:
            *field_paths: One or more field paths to select.

        Returns:
            A FireQuery instance with projection applied.

        Example:
            # Select specific fields
            results = users.select('name', 'email').get()
            # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]
        """
        from .fire_query import FireQuery

        if not field_paths:
            raise ValueError("select() requires at least one field path")

        # Create query with projection
        native_query = self._collection_ref.select(list(field_paths))
        return FireQuery(native_query, parent_collection=self, projection=field_paths)

    def get_all(self) -> Iterator[FireObject]:
        """
        Retrieve all documents in the collection.

        Phase 2.5 feature. Returns an iterator of all documents.

        Yields:
            FireObject instances in LOADED state for each document.

        Example:
            for user in users.get_all():
                print(f"{user.name}: {user.year}")
        """
        # Stream all documents from the collection
        for snapshot in self._collection_ref.stream():
            yield FireObject.from_snapshot(snapshot, parent_collection=self)

    # =========================================================================
    # Vector Query Methods
    # =========================================================================

    def find_nearest(
        self,
        vector_field: str,
        query_vector: Any,
        distance_measure: Any,
        limit: int,
        distance_result_field: Optional[str] = None,
    ) -> 'FireQuery':
        """
        Find the nearest neighbors based on vector similarity.

        Performs a vector similarity search to find documents with embeddings
        nearest to the query vector. Requires a single-field vector index on
        the vector_field.

        Args:
            vector_field: Name of the field containing vector embeddings.
            query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
            distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
                DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
            limit: Maximum number of nearest neighbors to return (max 1000).
            distance_result_field: Optional field name to store the calculated distance
                in the query results.

        Returns:
            A FireQuery instance for method chaining and execution.

        Example:
            from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
            from google.cloud.firestore_v1.vector import Vector

            collection = db.collection("documents")
            query = collection.find_nearest(
                vector_field="embedding",
                query_vector=Vector([0.1, 0.2, 0.3]),
                distance_measure=DistanceMeasure.EUCLIDEAN,
                limit=5
            )
            for doc in query.get():
                print(f"{doc.title}: {doc.embedding}")

        Note:
            - Requires a vector index on the vector_field
            - Maximum limit is 1000 documents
            - Can be combined with where() for pre-filtering (requires composite index)
            - Does not work with Firestore emulator (production only)
        """
        from .fire_query import FireQuery

        # Create vector query using native find_nearest
        native_query = self._collection_ref.find_nearest(
            vector_field=vector_field,
            query_vector=query_vector,
            distance_measure=distance_measure,
            limit=limit,
            distance_result_field=distance_result_field,
        )
        return FireQuery(native_query, parent_collection=self)

    # =========================================================================
    # Aggregation Methods (Phase 4 Part 5)
    # =========================================================================

    def count(self) -> int:
        """
        Count documents in the collection.

        Phase 4 Part 5 feature. Returns the total count of documents
        without fetching their data.

        Returns:
            The number of documents in the collection.

        Example:
            total = users.count()
            print(f"Total users: {total}")
        """
        from .fire_query import FireQuery
        # Use collection reference directly as a query for aggregation
        query = FireQuery(self._collection_ref, parent_collection=self)
        return query.count()

    def sum(self, field: str):
        """
        Sum a numeric field across all documents.

        Phase 4 Part 5 feature. Calculates the sum of a numeric field
        without fetching document data.

        Args:
            field: The field name to sum.

        Returns:
            The sum of the field values (int or float).

        Example:
            total_revenue = orders.sum('amount')
        """
        from .fire_query import FireQuery
        # Use collection reference directly as a query for aggregation
        query = FireQuery(self._collection_ref, parent_collection=self)
        return query.sum(field)

    def avg(self, field: str) -> float:
        """
        Average a numeric field across all documents.

        Phase 4 Part 5 feature. Calculates the average of a numeric field
        without fetching document data.

        Args:
            field: The field name to average.

        Returns:
            The average of the field values (float).

        Example:
            avg_rating = products.avg('rating')
        """
        from .fire_query import FireQuery
        # Use collection reference directly as a query for aggregation
        query = FireQuery(self._collection_ref, parent_collection=self)
        return query.avg(field)

    def aggregate(self, **aggregations):
        """
        Execute multiple aggregations in a single query.

        Phase 4 Part 5 feature. Performs multiple aggregation operations
        (count, sum, avg) in one efficient query.

        Args:
            **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

        Returns:
            Dictionary mapping aggregation names to their results.

        Example:
            from fire_prox import Count, Sum, Avg

            stats = users.aggregate(
                total=Count(),
                total_score=Sum('score'),
                avg_age=Avg('age')
            )
            # Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
        """
        from .fire_query import FireQuery
        # Use collection reference directly as a query for aggregation
        query = FireQuery(self._collection_ref, parent_collection=self)
        return query.aggregate(**aggregations)

    # =========================================================================
    # Collection Deletion
    # =========================================================================

    def delete_all(
        self,
        *,
        batch_size: int = 50,
        recursive: bool = True,
        dry_run: bool = False,
    ) -> Dict[str, int]:
        """
        Delete every document in this collection.

        Firestore offers no atomic "drop collection" operation. This helper
        iterates through each document and issues batched deletes. When
        recursive is True (default) it will also clear any nested subcollections
        before deleting their parent document.

        Args:
            batch_size: Maximum number of deletes to commit at once.
            recursive: Whether to delete nested subcollections.
            dry_run: Count what would be removed without executing writes.

        Returns:
            Dictionary with counts for deleted documents and subcollections
            visited during recursion.

        Raises:
            ValueError: If batch_size is not positive.
        """
        self._validate_batch_size(batch_size)

        return self._delete_collection_recursive(
            collection_ref=self._collection_ref,
            batch_size=batch_size,
            recursive=recursive,
            dry_run=dry_run,
            include_self=False,
        )

    def _delete_collection_recursive(
        self,
        *,
        collection_ref: Any,
        batch_size: int,
        recursive: bool,
        dry_run: bool,
        include_self: bool,
    ) -> Dict[str, int]:
        """Internal helper to delete documents within a collection reference."""
        client = collection_ref._client
        stats = {'documents': 0, 'collections': 1 if include_self else 0}
        batch = None if dry_run else client.batch()
        ops_in_batch = 0

        for doc_ref in collection_ref.list_documents(page_size=batch_size):
            if recursive:
                sub_stats = self._delete_document_subcollections(
                    doc_ref,
                    batch_size=batch_size,
                    recursive=recursive,
                    dry_run=dry_run,
                )
                stats['documents'] += sub_stats['documents']
                stats['collections'] += sub_stats['collections']

            if not dry_run and batch is not None:
                batch.delete(doc_ref)
                ops_in_batch += 1

            stats['documents'] += 1

            if not dry_run and batch is not None and ops_in_batch >= batch_size:
                batch.commit()
                batch = client.batch()
                ops_in_batch = 0

        if not dry_run and batch is not None and ops_in_batch:
            batch.commit()

        return stats

    def _delete_document_subcollections(
        self,
        doc_ref: Any,
        *,
        batch_size: int,
        recursive: bool,
        dry_run: bool,
    ) -> Dict[str, int]:
        """Delete all subcollections hanging off a document reference."""
        stats = {'documents': 0, 'collections': 0}

        for subcollection_ref in doc_ref.collections():
            sub_stats = self._delete_collection_recursive(
                collection_ref=subcollection_ref,
                batch_size=batch_size,
                recursive=recursive,
                dry_run=dry_run,
                include_self=True,
            )
            stats['documents'] += sub_stats['documents']
            stats['collections'] += sub_stats['collections']

        return stats

parent property

Get the parent document if this is a subcollection.

Returns: FireObject representing the parent document if this is a subcollection, None if this is a root-level collection.

Note: Phase 2 feature. Returns None in Phase 1 as subcollections are not yet implemented.

Example: posts = db.doc('users/alovelace').collection('posts') parent = posts.parent print(parent.path) # 'users/alovelace'

aggregate(**aggregations)

Execute multiple aggregations in a single query.

Phase 4 Part 5 feature. Performs multiple aggregation operations (count, sum, avg) in one efficient query.

Args: **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

Returns: Dictionary mapping aggregation names to their results.

Example: from fire_prox import Count, Sum, Avg

stats = users.aggregate(
    total=Count(),
    total_score=Sum('score'),
    avg_age=Avg('age')
)
# Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
Source code in src/fire_prox/fire_collection.py
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
def aggregate(self, **aggregations):
    """
    Execute multiple aggregations in a single query.

    Phase 4 Part 5 feature. Performs multiple aggregation operations
    (count, sum, avg) in one efficient query.

    Args:
        **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

    Returns:
        Dictionary mapping aggregation names to their results.

    Example:
        from fire_prox import Count, Sum, Avg

        stats = users.aggregate(
            total=Count(),
            total_score=Sum('score'),
            avg_age=Avg('age')
        )
        # Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
    """
    from .fire_query import FireQuery
    # Use collection reference directly as a query for aggregation
    query = FireQuery(self._collection_ref, parent_collection=self)
    return query.aggregate(**aggregations)

avg(field)

Average a numeric field across all documents.

Phase 4 Part 5 feature. Calculates the average of a numeric field without fetching document data.

Args: field: The field name to average.

Returns: The average of the field values (float).

Example: avg_rating = products.avg('rating')

Source code in src/fire_prox/fire_collection.py
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
def avg(self, field: str) -> float:
    """
    Average a numeric field across all documents.

    Phase 4 Part 5 feature. Calculates the average of a numeric field
    without fetching document data.

    Args:
        field: The field name to average.

    Returns:
        The average of the field values (float).

    Example:
        avg_rating = products.avg('rating')
    """
    from .fire_query import FireQuery
    # Use collection reference directly as a query for aggregation
    query = FireQuery(self._collection_ref, parent_collection=self)
    return query.avg(field)

count()

Count documents in the collection.

Phase 4 Part 5 feature. Returns the total count of documents without fetching their data.

Returns: The number of documents in the collection.

Example: total = users.count() print(f"Total users: {total}")

Source code in src/fire_prox/fire_collection.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def count(self) -> int:
    """
    Count documents in the collection.

    Phase 4 Part 5 feature. Returns the total count of documents
    without fetching their data.

    Returns:
        The number of documents in the collection.

    Example:
        total = users.count()
        print(f"Total users: {total}")
    """
    from .fire_query import FireQuery
    # Use collection reference directly as a query for aggregation
    query = FireQuery(self._collection_ref, parent_collection=self)
    return query.count()

delete_all(*, batch_size=50, recursive=True, dry_run=False)

Delete every document in this collection.

Firestore offers no atomic "drop collection" operation. This helper iterates through each document and issues batched deletes. When recursive is True (default) it will also clear any nested subcollections before deleting their parent document.

Args: batch_size: Maximum number of deletes to commit at once. recursive: Whether to delete nested subcollections. dry_run: Count what would be removed without executing writes.

Returns: Dictionary with counts for deleted documents and subcollections visited during recursion.

Raises: ValueError: If batch_size is not positive.

Source code in src/fire_prox/fire_collection.py
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
def delete_all(
    self,
    *,
    batch_size: int = 50,
    recursive: bool = True,
    dry_run: bool = False,
) -> Dict[str, int]:
    """
    Delete every document in this collection.

    Firestore offers no atomic "drop collection" operation. This helper
    iterates through each document and issues batched deletes. When
    recursive is True (default) it will also clear any nested subcollections
    before deleting their parent document.

    Args:
        batch_size: Maximum number of deletes to commit at once.
        recursive: Whether to delete nested subcollections.
        dry_run: Count what would be removed without executing writes.

    Returns:
        Dictionary with counts for deleted documents and subcollections
        visited during recursion.

    Raises:
        ValueError: If batch_size is not positive.
    """
    self._validate_batch_size(batch_size)

    return self._delete_collection_recursive(
        collection_ref=self._collection_ref,
        batch_size=batch_size,
        recursive=recursive,
        dry_run=dry_run,
        include_self=False,
    )

doc(doc_id)

Get a reference to a specific document in this collection.

Source code in src/fire_prox/fire_collection.py
73
74
75
def doc(self, doc_id: str) -> FireObject:
    """Get a reference to a specific document in this collection."""
    return super().doc(doc_id)

find_nearest(vector_field, query_vector, distance_measure, limit, distance_result_field=None)

Find the nearest neighbors based on vector similarity.

Performs a vector similarity search to find documents with embeddings nearest to the query vector. Requires a single-field vector index on the vector_field.

Args: vector_field: Name of the field containing vector embeddings. query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector). distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN, DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT). limit: Maximum number of nearest neighbors to return (max 1000). distance_result_field: Optional field name to store the calculated distance in the query results.

Returns: A FireQuery instance for method chaining and execution.

Example: from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector

collection = db.collection("documents")
query = collection.find_nearest(
    vector_field="embedding",
    query_vector=Vector([0.1, 0.2, 0.3]),
    distance_measure=DistanceMeasure.EUCLIDEAN,
    limit=5
)
for doc in query.get():
    print(f"{doc.title}: {doc.embedding}")

Note: - Requires a vector index on the vector_field - Maximum limit is 1000 documents - Can be combined with where() for pre-filtering (requires composite index) - Does not work with Firestore emulator (production only)

Source code in src/fire_prox/fire_collection.py
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
def find_nearest(
    self,
    vector_field: str,
    query_vector: Any,
    distance_measure: Any,
    limit: int,
    distance_result_field: Optional[str] = None,
) -> 'FireQuery':
    """
    Find the nearest neighbors based on vector similarity.

    Performs a vector similarity search to find documents with embeddings
    nearest to the query vector. Requires a single-field vector index on
    the vector_field.

    Args:
        vector_field: Name of the field containing vector embeddings.
        query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
        distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
            DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
        limit: Maximum number of nearest neighbors to return (max 1000).
        distance_result_field: Optional field name to store the calculated distance
            in the query results.

    Returns:
        A FireQuery instance for method chaining and execution.

    Example:
        from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
        from google.cloud.firestore_v1.vector import Vector

        collection = db.collection("documents")
        query = collection.find_nearest(
            vector_field="embedding",
            query_vector=Vector([0.1, 0.2, 0.3]),
            distance_measure=DistanceMeasure.EUCLIDEAN,
            limit=5
        )
        for doc in query.get():
            print(f"{doc.title}: {doc.embedding}")

    Note:
        - Requires a vector index on the vector_field
        - Maximum limit is 1000 documents
        - Can be combined with where() for pre-filtering (requires composite index)
        - Does not work with Firestore emulator (production only)
    """
    from .fire_query import FireQuery

    # Create vector query using native find_nearest
    native_query = self._collection_ref.find_nearest(
        vector_field=vector_field,
        query_vector=query_vector,
        distance_measure=distance_measure,
        limit=limit,
        distance_result_field=distance_result_field,
    )
    return FireQuery(native_query, parent_collection=self)

get_all()

Retrieve all documents in the collection.

Phase 2.5 feature. Returns an iterator of all documents.

Yields: FireObject instances in LOADED state for each document.

Example: for user in users.get_all(): print(f"{user.name}: {user.year}")

Source code in src/fire_prox/fire_collection.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
def get_all(self) -> Iterator[FireObject]:
    """
    Retrieve all documents in the collection.

    Phase 2.5 feature. Returns an iterator of all documents.

    Yields:
        FireObject instances in LOADED state for each document.

    Example:
        for user in users.get_all():
            print(f"{user.name}: {user.year}")
    """
    # Stream all documents from the collection
    for snapshot in self._collection_ref.stream():
        yield FireObject.from_snapshot(snapshot, parent_collection=self)

limit(count)

Create a query with a result limit.

Phase 2.5 feature. Limits the number of results returned.

Args: count: Maximum number of results to return.

Returns: A FireQuery instance for method chaining.

Source code in src/fire_prox/fire_collection.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def limit(self, count: int) -> 'FireQuery':
    """
    Create a query with a result limit.

    Phase 2.5 feature. Limits the number of results returned.

    Args:
        count: Maximum number of results to return.

    Returns:
        A FireQuery instance for method chaining.
    """
    from .fire_query import FireQuery

    if count <= 0:
        raise ValueError(f"Limit count must be positive, got {count}")

    # Create query with limit
    native_query = self._collection_ref.limit(count)
    return FireQuery(native_query, parent_collection=self)

new()

Create a new FireObject in DETACHED state.

Source code in src/fire_prox/fire_collection.py
69
70
71
def new(self) -> FireObject:
    """Create a new FireObject in DETACHED state."""
    return super().new()

order_by(field, direction='ASCENDING')

Create a query with ordering.

Phase 2.5 feature. Orders results by a field.

Args: field: The field path to order by. direction: 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

Returns: A FireQuery instance for method chaining.

Source code in src/fire_prox/fire_collection.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
def order_by(
    self,
    field: str,
    direction: str = 'ASCENDING'
) -> 'FireQuery':
    """
    Create a query with ordering.

    Phase 2.5 feature. Orders results by a field.

    Args:
        field: The field path to order by.
        direction: 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

    Returns:
        A FireQuery instance for method chaining.
    """
    from google.cloud.firestore_v1 import Query as QueryClass

    from .fire_query import FireQuery

    # Convert direction string to constant
    if direction.upper() == 'ASCENDING':
        direction_const = QueryClass.ASCENDING
    elif direction.upper() == 'DESCENDING':
        direction_const = QueryClass.DESCENDING
    else:
        raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

    # Create query with ordering
    native_query = self._collection_ref.order_by(field, direction=direction_const)
    return FireQuery(native_query, parent_collection=self)

select(*field_paths)

Create a query with field projection.

Phase 4 Part 3 feature. Selects specific fields to return in query results. Returns vanilla dictionaries instead of FireObject instances.

Args: *field_paths: One or more field paths to select.

Returns: A FireQuery instance with projection applied.

Example: # Select specific fields results = users.select('name', 'email').get() # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]

Source code in src/fire_prox/fire_collection.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
def select(self, *field_paths: str) -> 'FireQuery':
    """
    Create a query with field projection.

    Phase 4 Part 3 feature. Selects specific fields to return in query results.
    Returns vanilla dictionaries instead of FireObject instances.

    Args:
        *field_paths: One or more field paths to select.

    Returns:
        A FireQuery instance with projection applied.

    Example:
        # Select specific fields
        results = users.select('name', 'email').get()
        # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]
    """
    from .fire_query import FireQuery

    if not field_paths:
        raise ValueError("select() requires at least one field path")

    # Create query with projection
    native_query = self._collection_ref.select(list(field_paths))
    return FireQuery(native_query, parent_collection=self, projection=field_paths)

sum(field)

Sum a numeric field across all documents.

Phase 4 Part 5 feature. Calculates the sum of a numeric field without fetching document data.

Args: field: The field name to sum.

Returns: The sum of the field values (int or float).

Example: total_revenue = orders.sum('amount')

Source code in src/fire_prox/fire_collection.py
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
def sum(self, field: str):
    """
    Sum a numeric field across all documents.

    Phase 4 Part 5 feature. Calculates the sum of a numeric field
    without fetching document data.

    Args:
        field: The field name to sum.

    Returns:
        The sum of the field values (int or float).

    Example:
        total_revenue = orders.sum('amount')
    """
    from .fire_query import FireQuery
    # Use collection reference directly as a query for aggregation
    query = FireQuery(self._collection_ref, parent_collection=self)
    return query.sum(field)

where(field, op, value)

Create a query with a filter condition.

Phase 2.5 feature. Builds a lightweight query for common filtering needs. For complex queries, users should use the native API and hydrate results with FireObject.from_snapshot().

Args: field: The field path to filter on (e.g., 'name', 'address.city'). op: Comparison operator: '==', '!=', '<', '<=', '>', '>=', 'in', 'not-in', 'array-contains', 'array-contains-any'. value: The value to compare against.

Returns: A FireQuery instance for method chaining.

Example: query = users.where('birth_year', '>', 1800) .where('country', '==', 'UK') .limit(10) for user in query.get(): print(user.name)

Source code in src/fire_prox/fire_collection.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def where(self, field: str, op: str, value: Any) -> 'FireQuery':
    """
    Create a query with a filter condition.

    Phase 2.5 feature. Builds a lightweight query for common filtering
    needs. For complex queries, users should use the native API and
    hydrate results with FireObject.from_snapshot().

    Args:
        field: The field path to filter on (e.g., 'name', 'address.city').
        op: Comparison operator: '==', '!=', '<', '<=', '>', '>=',
            'in', 'not-in', 'array-contains', 'array-contains-any'.
        value: The value to compare against.

    Returns:
        A FireQuery instance for method chaining.

    Example:
        query = users.where('birth_year', '>', 1800)
                    .where('country', '==', 'UK')
                    .limit(10)
        for user in query.get():
            print(user.name)
    """
    from google.cloud.firestore_v1.base_query import FieldFilter

    from .fire_query import FireQuery

    # Create initial query with filter
    filter_obj = FieldFilter(field, op, value)
    native_query = self._collection_ref.where(filter=filter_obj)
    return FireQuery(native_query, parent_collection=self)

FireObject

Bases: BaseFireObject

A schemaless, state-aware proxy for a Firestore document (synchronous).

FireObject provides an object-oriented interface to Firestore documents, allowing attribute-style access to document fields and automatic state management throughout the document's lifecycle.

The object maintains an internal state machine (DETACHED -> ATTACHED -> LOADED -> DELETED) and tracks modifications to enable efficient partial updates.

This is the synchronous implementation that supports lazy loading via automatic fetch on attribute access.

Usage Examples: # Create a new document (DETACHED state) user = collection.new() user.name = 'Ada Lovelace' user.year = 1815 user.save() # Transitions to LOADED

# Load existing document (ATTACHED -> LOADED on access)
user = db.doc('users/alovelace')  # ATTACHED state
print(user.name)  # Triggers fetch, transitions to LOADED

# Update and save
user.year = 1816  # Marks as dirty
user.save()  # Performs update

# Delete
user.delete()  # Transitions to DELETED
Source code in src/fire_prox/fire_object.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
class FireObject(BaseFireObject):
    """
    A schemaless, state-aware proxy for a Firestore document (synchronous).

    FireObject provides an object-oriented interface to Firestore documents,
    allowing attribute-style access to document fields and automatic state
    management throughout the document's lifecycle.

    The object maintains an internal state machine (DETACHED -> ATTACHED ->
    LOADED -> DELETED) and tracks modifications to enable efficient partial
    updates.

    This is the synchronous implementation that supports lazy loading via
    automatic fetch on attribute access.

    Usage Examples:
        # Create a new document (DETACHED state)
        user = collection.new()
        user.name = 'Ada Lovelace'
        user.year = 1815
        user.save()  # Transitions to LOADED

        # Load existing document (ATTACHED -> LOADED on access)
        user = db.doc('users/alovelace')  # ATTACHED state
        print(user.name)  # Triggers fetch, transitions to LOADED

        # Update and save
        user.year = 1816  # Marks as dirty
        user.save()  # Performs update

        # Delete
        user.delete()  # Transitions to DELETED
    """

    # =========================================================================
    # Firestore I/O Hooks
    # =========================================================================

    def _get_snapshot(self, transaction: Optional[Any] = None) -> DocumentSnapshot:
        """Retrieve a document snapshot using the synchronous client."""
        if transaction is not None:
            return self._doc_ref.get(transaction=transaction)
        return self._doc_ref.get()

    def _create_document(self, doc_id: Optional[str] = None) -> DocumentReference:
        """Create a new synchronous document reference for DETACHED saves."""
        if not self._parent_collection:
            raise ValueError("DETACHED object has no parent collection")

        collection_ref = self._parent_collection._collection_ref
        if doc_id:
            doc_ref = collection_ref.document(doc_id)
        else:
            doc_ref = collection_ref.document()

        object.__setattr__(self, '_doc_ref', doc_ref)
        return doc_ref

    def _write_set(
        self,
        data: Dict[str, Any],
        doc_ref: Optional[DocumentReference] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> None:
        """Persist data via a set call on the synchronous client."""
        target_ref = doc_ref or self._doc_ref

        if transaction is not None:
            transaction.set(target_ref, data)
        elif batch is not None:
            batch.set(target_ref, data)
        else:
            target_ref.set(data)

    def _write_update(
        self,
        update_dict: Dict[str, Any],
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> None:
        """Perform an update operation using the synchronous client."""
        if transaction is not None:
            transaction.update(self._doc_ref, update_dict)
        elif batch is not None:
            batch.update(self._doc_ref, update_dict)
        else:
            self._doc_ref.update(update_dict)

    def _write_delete(self, batch: Optional[Any] = None) -> None:
        """Delete the document using the synchronous client."""
        if batch is not None:
            batch.delete(self._doc_ref)
        else:
            self._doc_ref.delete()

    # =========================================================================
    # Dynamic Attribute Handling (Sync-specific for lazy loading)
    # =========================================================================

    def __getattr__(self, name: str) -> Any:
        """
        Handle attribute access for document fields with lazy loading.

        This method implements lazy loading: if the object is in ATTACHED state,
        accessing any data attribute will automatically trigger a fetch() to load
        the data from Firestore.

        Args:
            name: The attribute name being accessed.

        Returns:
            The value of the field from the internal _data cache.

        Raises:
            AttributeError: If the attribute doesn't exist in _data after
                           fetching (if necessary).

        State Transitions:
            ATTACHED -> LOADED: Automatically fetches data on first access.

        Example:
            user = db.doc('users/alovelace')  # ATTACHED
            name = user.name  # Triggers fetch, transitions to LOADED
            year = user.year  # No fetch needed, already LOADED
        """
        # Check if we're accessing internal data
        if name == '_data':
            raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")

        # If we're in ATTACHED state, trigger lazy loading
        if self._state == State.ATTACHED:
            # Synchronous fetch for lazy loading
            self.fetch()

        return self._materialize_field(name)

    # =========================================================================
    # Core Lifecycle Methods (Sync-specific I/O)
    # =========================================================================

    def fetch(self, force: bool = False, transaction: Optional[Any] = None) -> 'FireObject':
        """
        Fetch document data from Firestore (synchronous).

        Retrieves the latest data from Firestore and populates the internal
        _data cache. This method transitions ATTACHED objects to LOADED state
        and can refresh data for already-LOADED objects.

        Args:
            force: If True, fetch data even if already LOADED. Useful for
                  refreshing data to get latest changes from Firestore.
                  Default is False.
            transaction: Optional transaction object for transactional reads.
                        If provided, the read will be part of the transaction.

        Returns:
            Self, to allow method chaining.

        Raises:
            ValueError: If called on a DETACHED object (no DocumentReference).
            RuntimeError: If called on a DELETED object.
            NotFound: If document doesn't exist in Firestore.

        State Transitions:
            ATTACHED -> LOADED: First fetch populates data
            LOADED -> LOADED: Refreshes data if force=True

        Example:
            # Normal fetch
            user = db.doc('users/alovelace')  # ATTACHED
            user.fetch()  # Now LOADED with data

            # Transactional fetch
            transaction = db.transaction()
            @firestore.transactional
            def read_user(transaction):
                user.fetch(transaction=transaction)
                return user.credits
            credits = read_user(transaction)
        """
        if self._should_skip_fetch(force):
            return self

        snapshot = self._get_snapshot(transaction)
        self._process_snapshot(snapshot, is_async=False)

        return self

    def save(
        self,
        doc_id: Optional[str] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> 'FireObject':
        """
        Save the object's data to Firestore (synchronous).

        Creates or updates the Firestore document based on the object's
        current state. For DETACHED objects, creates a new document. For
        LOADED objects, performs a full overwrite (Phase 1).

        Args:
            doc_id: Optional custom document ID. Only used when saving a
                   DETACHED object. If None, Firestore auto-generates an ID.
            transaction: Optional transaction object for transactional writes.
                        If provided, the write will be part of the transaction.
            batch: Optional batch object for batched writes. If provided,
                  the write will be accumulated in the batch (committed later).

        Returns:
            Self, to allow method chaining.

        Raises:
            RuntimeError: If called on a DELETED object.
            ValueError: If DETACHED object has no parent collection, or if
                       trying to create a new document within a transaction or batch.

        State Transitions:
            DETACHED -> LOADED: Creates new document with doc_id or auto-ID
            LOADED -> LOADED: Updates document if dirty, no-op if clean

        Example:
            # Create new document
            user = collection.new()
            user.name = 'Ada'
            user.save(doc_id='alovelace')  # DETACHED -> LOADED

            # Update existing
            user.year = 1816
            user.save()  # Performs update

            # Transactional save
            transaction = db.transaction()
            @firestore.transactional
            def update_user(transaction):
                user.fetch(transaction=transaction)
                user.credits += 10
                user.save(transaction=transaction)
            update_user(transaction)

            # Batch save
            batch = db.batch()
            user1.save(batch=batch)
            user2.save(batch=batch)
            batch.commit()  # Commit all operations
        """
        self._validate_not_deleted("save()")

        if self._state == State.DETACHED:
            doc_ref, storage_data = self._prepare_detached_save(doc_id, transaction, batch)
            self._write_set(storage_data, doc_ref=doc_ref)
            object.__setattr__(self, '_state', State.LOADED)
            self._mark_clean()
            return self

        if self._state == State.LOADED:
            if not self.is_dirty():
                return self

            update_dict = self._build_update_dict()
            self._write_update(update_dict, transaction=transaction, batch=batch)
            self._mark_clean()
            return self

        if self._state == State.ATTACHED:
            storage_data = self._prepare_data_for_storage()
            self._write_set(storage_data, transaction=transaction, batch=batch)
            object.__setattr__(self, '_state', State.LOADED)
            self._mark_clean()
            return self

        return self

    def collections(self, names_only: bool = False) -> List[Any]:
        """
        List subcollections beneath this document.

        Args:
            names_only: When True, return collection IDs instead of wrappers.

        Returns:
            List of subcollection names or FireCollection wrappers.
        """
        self._validate_not_detached("collections()")
        self._validate_not_deleted("collections()")

        subcollections = list(self._doc_ref.collections())
        if names_only:
            return [col.id for col in subcollections]

        return [self.collection(col.id) for col in subcollections]

    def delete(
        self,
        batch: Optional[Any] = None,
        *,
        recursive: bool = True,
        batch_size: int = 50,
    ) -> None:
        """
        Delete the document from Firestore (synchronous).

        Removes the document from Firestore and transitions the object to
        DELETED state. After deletion, the object retains its ID and path
        for reference but cannot be modified or saved.

        Args:
            batch: Optional batch object for batched deletes. If provided,
                  the delete will be accumulated in the batch (committed later).
            recursive: When True (default), delete all subcollections first.
            batch_size: Batch size to use for recursive subcollection cleanup.

        Raises:
            ValueError: If called on a DETACHED object (no document to delete).
            RuntimeError: If called on an already-DELETED object.
            ValueError: If recursive deletion is requested while using a batch.

        State Transitions:
            ATTACHED -> DELETED: Deletes document (data never loaded)
            LOADED -> DELETED: Deletes document (data was loaded)

        Example:
            user = db.doc('users/alovelace')
            user.delete()  # Document removed from Firestore
            print(user.state)  # State.DELETED
            print(user.id)  # Still accessible: 'alovelace'

            # Batch delete
            batch = db.batch()
            user1.delete(batch=batch, recursive=False)
            user2.delete(batch=batch, recursive=False)
            batch.commit()  # Commit all operations
        """
        if recursive:
            if batch is not None:
                raise ValueError("Cannot delete recursively as part of a batch.")
            if batch_size <= 0:
                raise ValueError(f"batch_size must be positive, got {batch_size}")
            self._delete_descendant_collections(batch_size=batch_size)

        self._prepare_delete()
        self._write_delete(batch=batch)
        self._transition_to_deleted()

    def _delete_descendant_collections(self, batch_size: int) -> None:
        """Delete all subcollections beneath this document."""
        for name in self.collections(names_only=True):
            subcollection = self.collection(name)
            subcollection.delete_all(batch_size=batch_size, recursive=True)

    # =========================================================================
    # Subcollection Utilities
    # =========================================================================

    def delete_subcollection(
        self,
        name: str,
        *,
        batch_size: int = 50,
        recursive: bool = True,
        dry_run: bool = False,
    ) -> Dict[str, int]:
        """
        Delete a subcollection beneath this document.

        Firestore keeps subcollections even after their parent document is
        deleted. This helper clears a specific subcollection using the same
        batched logic as FireCollection.delete_all().

        Args:
            name: Subcollection name relative to this document.
            batch_size: Maximum number of deletes per commit.
            recursive: Whether to delete nested subcollections.
            dry_run: Count affected documents without executing writes.

        Returns:
            Dictionary with counts for deleted documents and subcollections.
        """
        subcollection = self.collection(name)
        return subcollection.delete_all(
            batch_size=batch_size,
            recursive=recursive,
            dry_run=dry_run,
        )

    # =========================================================================
    # Factory Methods
    # =========================================================================

    @classmethod
    def from_snapshot(
        cls,
        snapshot: DocumentSnapshot,
        parent_collection: Optional[Any] = None
    ) -> 'FireObject':
        """
        Create a FireObject from a Firestore DocumentSnapshot.

        This factory method is the primary "hydration" mechanism for
        converting native Firestore query results into FireObject instances.
        It creates an object in LOADED state with data already populated.

        Args:
            snapshot: A DocumentSnapshot from google-cloud-firestore, typically
                     obtained from query results or document.get().
            parent_collection: Optional reference to parent FireCollection.

        Returns:
            A new FireObject instance in LOADED state with data from snapshot.

        Raises:
            ValueError: If snapshot doesn't exist (snapshot.exists is False).

        Example:
            # Hydrate from native query
            native_query = client.collection('users').where('year', '>', 1800)
            results = [FireObject.from_snapshot(snap)
                      for snap in native_query.stream()]

            # Hydrate from direct get
            snap = client.document('users/alovelace').get()
            user = FireObject.from_snapshot(snap)
        """
        # Use base class helper to extract snapshot data
        init_params = cls._create_from_snapshot_base(snapshot, parent_collection)

        # Create FireObject in LOADED state
        obj = cls(
            doc_ref=init_params['doc_ref'],
            initial_state=init_params['initial_state'],
            parent_collection=init_params['parent_collection']
        )

        # Populate data from snapshot
        object.__setattr__(obj, '_data', init_params['data'])

        return obj

__getattr__(name)

Handle attribute access for document fields with lazy loading.

This method implements lazy loading: if the object is in ATTACHED state, accessing any data attribute will automatically trigger a fetch() to load the data from Firestore.

Args: name: The attribute name being accessed.

Returns: The value of the field from the internal _data cache.

Raises: AttributeError: If the attribute doesn't exist in _data after fetching (if necessary).

State Transitions: ATTACHED -> LOADED: Automatically fetches data on first access.

Example: user = db.doc('users/alovelace') # ATTACHED name = user.name # Triggers fetch, transitions to LOADED year = user.year # No fetch needed, already LOADED

Source code in src/fire_prox/fire_object.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def __getattr__(self, name: str) -> Any:
    """
    Handle attribute access for document fields with lazy loading.

    This method implements lazy loading: if the object is in ATTACHED state,
    accessing any data attribute will automatically trigger a fetch() to load
    the data from Firestore.

    Args:
        name: The attribute name being accessed.

    Returns:
        The value of the field from the internal _data cache.

    Raises:
        AttributeError: If the attribute doesn't exist in _data after
                       fetching (if necessary).

    State Transitions:
        ATTACHED -> LOADED: Automatically fetches data on first access.

    Example:
        user = db.doc('users/alovelace')  # ATTACHED
        name = user.name  # Triggers fetch, transitions to LOADED
        year = user.year  # No fetch needed, already LOADED
    """
    # Check if we're accessing internal data
    if name == '_data':
        raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")

    # If we're in ATTACHED state, trigger lazy loading
    if self._state == State.ATTACHED:
        # Synchronous fetch for lazy loading
        self.fetch()

    return self._materialize_field(name)

collections(names_only=False)

List subcollections beneath this document.

Args: names_only: When True, return collection IDs instead of wrappers.

Returns: List of subcollection names or FireCollection wrappers.

Source code in src/fire_prox/fire_object.py
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def collections(self, names_only: bool = False) -> List[Any]:
    """
    List subcollections beneath this document.

    Args:
        names_only: When True, return collection IDs instead of wrappers.

    Returns:
        List of subcollection names or FireCollection wrappers.
    """
    self._validate_not_detached("collections()")
    self._validate_not_deleted("collections()")

    subcollections = list(self._doc_ref.collections())
    if names_only:
        return [col.id for col in subcollections]

    return [self.collection(col.id) for col in subcollections]

delete(batch=None, *, recursive=True, batch_size=50)

Delete the document from Firestore (synchronous).

Removes the document from Firestore and transitions the object to DELETED state. After deletion, the object retains its ID and path for reference but cannot be modified or saved.

Args: batch: Optional batch object for batched deletes. If provided, the delete will be accumulated in the batch (committed later). recursive: When True (default), delete all subcollections first. batch_size: Batch size to use for recursive subcollection cleanup.

Raises: ValueError: If called on a DETACHED object (no document to delete). RuntimeError: If called on an already-DELETED object. ValueError: If recursive deletion is requested while using a batch.

State Transitions: ATTACHED -> DELETED: Deletes document (data never loaded) LOADED -> DELETED: Deletes document (data was loaded)

Example: user = db.doc('users/alovelace') user.delete() # Document removed from Firestore print(user.state) # State.DELETED print(user.id) # Still accessible: 'alovelace'

# Batch delete
batch = db.batch()
user1.delete(batch=batch, recursive=False)
user2.delete(batch=batch, recursive=False)
batch.commit()  # Commit all operations
Source code in src/fire_prox/fire_object.py
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
def delete(
    self,
    batch: Optional[Any] = None,
    *,
    recursive: bool = True,
    batch_size: int = 50,
) -> None:
    """
    Delete the document from Firestore (synchronous).

    Removes the document from Firestore and transitions the object to
    DELETED state. After deletion, the object retains its ID and path
    for reference but cannot be modified or saved.

    Args:
        batch: Optional batch object for batched deletes. If provided,
              the delete will be accumulated in the batch (committed later).
        recursive: When True (default), delete all subcollections first.
        batch_size: Batch size to use for recursive subcollection cleanup.

    Raises:
        ValueError: If called on a DETACHED object (no document to delete).
        RuntimeError: If called on an already-DELETED object.
        ValueError: If recursive deletion is requested while using a batch.

    State Transitions:
        ATTACHED -> DELETED: Deletes document (data never loaded)
        LOADED -> DELETED: Deletes document (data was loaded)

    Example:
        user = db.doc('users/alovelace')
        user.delete()  # Document removed from Firestore
        print(user.state)  # State.DELETED
        print(user.id)  # Still accessible: 'alovelace'

        # Batch delete
        batch = db.batch()
        user1.delete(batch=batch, recursive=False)
        user2.delete(batch=batch, recursive=False)
        batch.commit()  # Commit all operations
    """
    if recursive:
        if batch is not None:
            raise ValueError("Cannot delete recursively as part of a batch.")
        if batch_size <= 0:
            raise ValueError(f"batch_size must be positive, got {batch_size}")
        self._delete_descendant_collections(batch_size=batch_size)

    self._prepare_delete()
    self._write_delete(batch=batch)
    self._transition_to_deleted()

delete_subcollection(name, *, batch_size=50, recursive=True, dry_run=False)

Delete a subcollection beneath this document.

Firestore keeps subcollections even after their parent document is deleted. This helper clears a specific subcollection using the same batched logic as FireCollection.delete_all().

Args: name: Subcollection name relative to this document. batch_size: Maximum number of deletes per commit. recursive: Whether to delete nested subcollections. dry_run: Count affected documents without executing writes.

Returns: Dictionary with counts for deleted documents and subcollections.

Source code in src/fire_prox/fire_object.py
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
def delete_subcollection(
    self,
    name: str,
    *,
    batch_size: int = 50,
    recursive: bool = True,
    dry_run: bool = False,
) -> Dict[str, int]:
    """
    Delete a subcollection beneath this document.

    Firestore keeps subcollections even after their parent document is
    deleted. This helper clears a specific subcollection using the same
    batched logic as FireCollection.delete_all().

    Args:
        name: Subcollection name relative to this document.
        batch_size: Maximum number of deletes per commit.
        recursive: Whether to delete nested subcollections.
        dry_run: Count affected documents without executing writes.

    Returns:
        Dictionary with counts for deleted documents and subcollections.
    """
    subcollection = self.collection(name)
    return subcollection.delete_all(
        batch_size=batch_size,
        recursive=recursive,
        dry_run=dry_run,
    )

fetch(force=False, transaction=None)

Fetch document data from Firestore (synchronous).

Retrieves the latest data from Firestore and populates the internal _data cache. This method transitions ATTACHED objects to LOADED state and can refresh data for already-LOADED objects.

Args: force: If True, fetch data even if already LOADED. Useful for refreshing data to get latest changes from Firestore. Default is False. transaction: Optional transaction object for transactional reads. If provided, the read will be part of the transaction.

Returns: Self, to allow method chaining.

Raises: ValueError: If called on a DETACHED object (no DocumentReference). RuntimeError: If called on a DELETED object. NotFound: If document doesn't exist in Firestore.

State Transitions: ATTACHED -> LOADED: First fetch populates data LOADED -> LOADED: Refreshes data if force=True

Example: # Normal fetch user = db.doc('users/alovelace') # ATTACHED user.fetch() # Now LOADED with data

# Transactional fetch
transaction = db.transaction()
@firestore.transactional
def read_user(transaction):
    user.fetch(transaction=transaction)
    return user.credits
credits = read_user(transaction)
Source code in src/fire_prox/fire_object.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
def fetch(self, force: bool = False, transaction: Optional[Any] = None) -> 'FireObject':
    """
    Fetch document data from Firestore (synchronous).

    Retrieves the latest data from Firestore and populates the internal
    _data cache. This method transitions ATTACHED objects to LOADED state
    and can refresh data for already-LOADED objects.

    Args:
        force: If True, fetch data even if already LOADED. Useful for
              refreshing data to get latest changes from Firestore.
              Default is False.
        transaction: Optional transaction object for transactional reads.
                    If provided, the read will be part of the transaction.

    Returns:
        Self, to allow method chaining.

    Raises:
        ValueError: If called on a DETACHED object (no DocumentReference).
        RuntimeError: If called on a DELETED object.
        NotFound: If document doesn't exist in Firestore.

    State Transitions:
        ATTACHED -> LOADED: First fetch populates data
        LOADED -> LOADED: Refreshes data if force=True

    Example:
        # Normal fetch
        user = db.doc('users/alovelace')  # ATTACHED
        user.fetch()  # Now LOADED with data

        # Transactional fetch
        transaction = db.transaction()
        @firestore.transactional
        def read_user(transaction):
            user.fetch(transaction=transaction)
            return user.credits
        credits = read_user(transaction)
    """
    if self._should_skip_fetch(force):
        return self

    snapshot = self._get_snapshot(transaction)
    self._process_snapshot(snapshot, is_async=False)

    return self

from_snapshot(snapshot, parent_collection=None) classmethod

Create a FireObject from a Firestore DocumentSnapshot.

This factory method is the primary "hydration" mechanism for converting native Firestore query results into FireObject instances. It creates an object in LOADED state with data already populated.

Args: snapshot: A DocumentSnapshot from google-cloud-firestore, typically obtained from query results or document.get(). parent_collection: Optional reference to parent FireCollection.

Returns: A new FireObject instance in LOADED state with data from snapshot.

Raises: ValueError: If snapshot doesn't exist (snapshot.exists is False).

Example: # Hydrate from native query native_query = client.collection('users').where('year', '>', 1800) results = [FireObject.from_snapshot(snap) for snap in native_query.stream()]

# Hydrate from direct get
snap = client.document('users/alovelace').get()
user = FireObject.from_snapshot(snap)
Source code in src/fire_prox/fire_object.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
@classmethod
def from_snapshot(
    cls,
    snapshot: DocumentSnapshot,
    parent_collection: Optional[Any] = None
) -> 'FireObject':
    """
    Create a FireObject from a Firestore DocumentSnapshot.

    This factory method is the primary "hydration" mechanism for
    converting native Firestore query results into FireObject instances.
    It creates an object in LOADED state with data already populated.

    Args:
        snapshot: A DocumentSnapshot from google-cloud-firestore, typically
                 obtained from query results or document.get().
        parent_collection: Optional reference to parent FireCollection.

    Returns:
        A new FireObject instance in LOADED state with data from snapshot.

    Raises:
        ValueError: If snapshot doesn't exist (snapshot.exists is False).

    Example:
        # Hydrate from native query
        native_query = client.collection('users').where('year', '>', 1800)
        results = [FireObject.from_snapshot(snap)
                  for snap in native_query.stream()]

        # Hydrate from direct get
        snap = client.document('users/alovelace').get()
        user = FireObject.from_snapshot(snap)
    """
    # Use base class helper to extract snapshot data
    init_params = cls._create_from_snapshot_base(snapshot, parent_collection)

    # Create FireObject in LOADED state
    obj = cls(
        doc_ref=init_params['doc_ref'],
        initial_state=init_params['initial_state'],
        parent_collection=init_params['parent_collection']
    )

    # Populate data from snapshot
    object.__setattr__(obj, '_data', init_params['data'])

    return obj

save(doc_id=None, transaction=None, batch=None)

Save the object's data to Firestore (synchronous).

Creates or updates the Firestore document based on the object's current state. For DETACHED objects, creates a new document. For LOADED objects, performs a full overwrite (Phase 1).

Args: doc_id: Optional custom document ID. Only used when saving a DETACHED object. If None, Firestore auto-generates an ID. transaction: Optional transaction object for transactional writes. If provided, the write will be part of the transaction. batch: Optional batch object for batched writes. If provided, the write will be accumulated in the batch (committed later).

Returns: Self, to allow method chaining.

Raises: RuntimeError: If called on a DELETED object. ValueError: If DETACHED object has no parent collection, or if trying to create a new document within a transaction or batch.

State Transitions: DETACHED -> LOADED: Creates new document with doc_id or auto-ID LOADED -> LOADED: Updates document if dirty, no-op if clean

Example: # Create new document user = collection.new() user.name = 'Ada' user.save(doc_id='alovelace') # DETACHED -> LOADED

# Update existing
user.year = 1816
user.save()  # Performs update

# Transactional save
transaction = db.transaction()
@firestore.transactional
def update_user(transaction):
    user.fetch(transaction=transaction)
    user.credits += 10
    user.save(transaction=transaction)
update_user(transaction)

# Batch save
batch = db.batch()
user1.save(batch=batch)
user2.save(batch=batch)
batch.commit()  # Commit all operations
Source code in src/fire_prox/fire_object.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
def save(
    self,
    doc_id: Optional[str] = None,
    transaction: Optional[Any] = None,
    batch: Optional[Any] = None,
) -> 'FireObject':
    """
    Save the object's data to Firestore (synchronous).

    Creates or updates the Firestore document based on the object's
    current state. For DETACHED objects, creates a new document. For
    LOADED objects, performs a full overwrite (Phase 1).

    Args:
        doc_id: Optional custom document ID. Only used when saving a
               DETACHED object. If None, Firestore auto-generates an ID.
        transaction: Optional transaction object for transactional writes.
                    If provided, the write will be part of the transaction.
        batch: Optional batch object for batched writes. If provided,
              the write will be accumulated in the batch (committed later).

    Returns:
        Self, to allow method chaining.

    Raises:
        RuntimeError: If called on a DELETED object.
        ValueError: If DETACHED object has no parent collection, or if
                   trying to create a new document within a transaction or batch.

    State Transitions:
        DETACHED -> LOADED: Creates new document with doc_id or auto-ID
        LOADED -> LOADED: Updates document if dirty, no-op if clean

    Example:
        # Create new document
        user = collection.new()
        user.name = 'Ada'
        user.save(doc_id='alovelace')  # DETACHED -> LOADED

        # Update existing
        user.year = 1816
        user.save()  # Performs update

        # Transactional save
        transaction = db.transaction()
        @firestore.transactional
        def update_user(transaction):
            user.fetch(transaction=transaction)
            user.credits += 10
            user.save(transaction=transaction)
        update_user(transaction)

        # Batch save
        batch = db.batch()
        user1.save(batch=batch)
        user2.save(batch=batch)
        batch.commit()  # Commit all operations
    """
    self._validate_not_deleted("save()")

    if self._state == State.DETACHED:
        doc_ref, storage_data = self._prepare_detached_save(doc_id, transaction, batch)
        self._write_set(storage_data, doc_ref=doc_ref)
        object.__setattr__(self, '_state', State.LOADED)
        self._mark_clean()
        return self

    if self._state == State.LOADED:
        if not self.is_dirty():
            return self

        update_dict = self._build_update_dict()
        self._write_update(update_dict, transaction=transaction, batch=batch)
        self._mark_clean()
        return self

    if self._state == State.ATTACHED:
        storage_data = self._prepare_data_for_storage()
        self._write_set(storage_data, transaction=transaction, batch=batch)
        object.__setattr__(self, '_state', State.LOADED)
        self._mark_clean()
        return self

    return self

FireProx

Bases: BaseFireProx

Main entry point for the FireProx library (synchronous).

FireProx wraps the native google-cloud-firestore Client and provides a simplified, Pythonic interface for working with Firestore. It delegates authentication and client configuration to the official library while providing higher-level abstractions for document and collection access.

The design philosophy is "wrap, don't replace" - FireProx leverages the reliability and security of the native client while providing a more intuitive developer experience optimized for rapid prototyping.

This is the synchronous implementation that supports lazy loading.

Usage Examples: # Initialize with a pre-configured native client from google.cloud import firestore from fire_prox import FireProx

native_client = firestore.Client(project='my-project')
db = FireProx(native_client)

# Access a document (ATTACHED state, lazy loading)
user = db.doc('users/alovelace')
print(user.name)  # Automatically fetches data

# Create a new document
users = db.collection('users')
new_user = users.new()
new_user.name = 'Charles Babbage'
new_user.year = 1791
new_user.save()

# Update a document
user = db.doc('users/alovelace')
user.year = 1816
user.save()

# Delete a document
user.delete()
Source code in src/fire_prox/fireprox.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
class FireProx(BaseFireProx):
    """
    Main entry point for the FireProx library (synchronous).

    FireProx wraps the native google-cloud-firestore Client and provides a
    simplified, Pythonic interface for working with Firestore. It delegates
    authentication and client configuration to the official library while
    providing higher-level abstractions for document and collection access.

    The design philosophy is "wrap, don't replace" - FireProx leverages the
    reliability and security of the native client while providing a more
    intuitive developer experience optimized for rapid prototyping.

    This is the synchronous implementation that supports lazy loading.

    Usage Examples:
        # Initialize with a pre-configured native client
        from google.cloud import firestore
        from fire_prox import FireProx

        native_client = firestore.Client(project='my-project')
        db = FireProx(native_client)

        # Access a document (ATTACHED state, lazy loading)
        user = db.doc('users/alovelace')
        print(user.name)  # Automatically fetches data

        # Create a new document
        users = db.collection('users')
        new_user = users.new()
        new_user.name = 'Charles Babbage'
        new_user.year = 1791
        new_user.save()

        # Update a document
        user = db.doc('users/alovelace')
        user.year = 1816
        user.save()

        # Delete a document
        user.delete()
    """

    def __init__(self, client: FirestoreClient):
        """
        Initialize FireProx with a native Firestore client.

        Args:
            client: A configured google.cloud.firestore.Client instance.
                   Authentication and project configuration should be handled
                   before creating this instance.

        Raises:
            TypeError: If client is not a google.cloud.firestore.Client instance.

        Example:
            from google.cloud import firestore
            from fire_prox import FireProx

            # Option 1: Default credentials
            native_client = firestore.Client()

            # Option 2: Explicit project
            native_client = firestore.Client(project='my-project-id')

            # Option 3: Service account
            native_client = firestore.Client.from_service_account_json(
                'path/to/credentials.json'
            )

            # Initialize FireProx
            db = FireProx(native_client)
        """
        # Type checking for sync client
        if not isinstance(client, FirestoreClient):
            raise TypeError(
                f"client must be a google.cloud.firestore.Client, got {type(client)}"
            )

        # Initialize base class
        super().__init__(client)

    # =========================================================================
    # Document Access
    # =========================================================================

    def doc(self, path: str) -> FireObject:
        """
        Get a reference to a document by its full path.

        Creates a FireObject in ATTACHED state. No data is fetched from
        Firestore until an attribute is accessed (lazy loading).

        Args:
            path: The full document path, e.g., 'users/alovelace' or
                 'users/uid/posts/post123'. Must be a valid Firestore
                 document path with an even number of segments.

        Returns:
            A FireObject instance in ATTACHED state.

        Raises:
            ValueError: If path has an odd number of segments (invalid
                       document path) or contains invalid characters.

        Example:
            # Root-level document
            user = db.doc('users/alovelace')

            # Nested document (subcollection)
            post = db.doc('users/alovelace/posts/post123')

            # Lazy loading
            print(user.name)  # Triggers fetch on first access
        """
        return self._create_document_proxy(path, FireObject)

    def document(self, path: str) -> FireObject:
        """
        Alias for doc(). Get a reference to a document by its full path.

        Provided for API consistency with the native library and user
        preference. Functionally identical to doc().

        Args:
            path: The full document path.

        Returns:
            A FireObject instance in ATTACHED state.
        """
        return self.doc(path)

    # =========================================================================
    # Collection Access
    # =========================================================================

    def collection(self, path: str) -> FireCollection:
        """
        Get a reference to a collection by its path.

        Creates a FireCollection wrapper around the native CollectionReference.
        Used for creating new documents or (in Phase 2) querying.

        Args:
            path: The collection path, e.g., 'users' or 'users/uid/posts'.
                 Can be a root-level collection (odd number of segments) or
                 a subcollection path.

        Returns:
            A FireCollection instance.

        Raises:
            ValueError: If path has an even number of segments (invalid
                       collection path) or contains invalid characters.

        Example:
            # Root-level collection
            users = db.collection('users')
            new_user = users.new()
            new_user.name = 'Ada'
            new_user.save()

            # Subcollection
            posts = db.collection('users/alovelace/posts')
            new_post = posts.new()
            new_post.title = 'Analysis Engine'
            new_post.save()
        """
        return self._create_collection_proxy(path, FireCollection)

    def collections(self, path: str, *, names_only: bool = False) -> list[Any]:
        """
        List subcollections beneath the specified document path.

        Args:
            path: Document path whose subcollections should be listed.
            names_only: Return collection IDs instead of FireCollection wrappers.

        Returns:
            List of subcollection names or FireCollection wrappers.
        """
        document = self.doc(path)
        return document.collections(names_only=names_only)

__init__(client)

Initialize FireProx with a native Firestore client.

Args: client: A configured google.cloud.firestore.Client instance. Authentication and project configuration should be handled before creating this instance.

Raises: TypeError: If client is not a google.cloud.firestore.Client instance.

Example: from google.cloud import firestore from fire_prox import FireProx

# Option 1: Default credentials
native_client = firestore.Client()

# Option 2: Explicit project
native_client = firestore.Client(project='my-project-id')

# Option 3: Service account
native_client = firestore.Client.from_service_account_json(
    'path/to/credentials.json'
)

# Initialize FireProx
db = FireProx(native_client)
Source code in src/fire_prox/fireprox.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
def __init__(self, client: FirestoreClient):
    """
    Initialize FireProx with a native Firestore client.

    Args:
        client: A configured google.cloud.firestore.Client instance.
               Authentication and project configuration should be handled
               before creating this instance.

    Raises:
        TypeError: If client is not a google.cloud.firestore.Client instance.

    Example:
        from google.cloud import firestore
        from fire_prox import FireProx

        # Option 1: Default credentials
        native_client = firestore.Client()

        # Option 2: Explicit project
        native_client = firestore.Client(project='my-project-id')

        # Option 3: Service account
        native_client = firestore.Client.from_service_account_json(
            'path/to/credentials.json'
        )

        # Initialize FireProx
        db = FireProx(native_client)
    """
    # Type checking for sync client
    if not isinstance(client, FirestoreClient):
        raise TypeError(
            f"client must be a google.cloud.firestore.Client, got {type(client)}"
        )

    # Initialize base class
    super().__init__(client)

collection(path)

Get a reference to a collection by its path.

Creates a FireCollection wrapper around the native CollectionReference. Used for creating new documents or (in Phase 2) querying.

Args: path: The collection path, e.g., 'users' or 'users/uid/posts'. Can be a root-level collection (odd number of segments) or a subcollection path.

Returns: A FireCollection instance.

Raises: ValueError: If path has an even number of segments (invalid collection path) or contains invalid characters.

Example: # Root-level collection users = db.collection('users') new_user = users.new() new_user.name = 'Ada' new_user.save()

# Subcollection
posts = db.collection('users/alovelace/posts')
new_post = posts.new()
new_post.title = 'Analysis Engine'
new_post.save()
Source code in src/fire_prox/fireprox.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
def collection(self, path: str) -> FireCollection:
    """
    Get a reference to a collection by its path.

    Creates a FireCollection wrapper around the native CollectionReference.
    Used for creating new documents or (in Phase 2) querying.

    Args:
        path: The collection path, e.g., 'users' or 'users/uid/posts'.
             Can be a root-level collection (odd number of segments) or
             a subcollection path.

    Returns:
        A FireCollection instance.

    Raises:
        ValueError: If path has an even number of segments (invalid
                   collection path) or contains invalid characters.

    Example:
        # Root-level collection
        users = db.collection('users')
        new_user = users.new()
        new_user.name = 'Ada'
        new_user.save()

        # Subcollection
        posts = db.collection('users/alovelace/posts')
        new_post = posts.new()
        new_post.title = 'Analysis Engine'
        new_post.save()
    """
    return self._create_collection_proxy(path, FireCollection)

collections(path, *, names_only=False)

List subcollections beneath the specified document path.

Args: path: Document path whose subcollections should be listed. names_only: Return collection IDs instead of FireCollection wrappers.

Returns: List of subcollection names or FireCollection wrappers.

Source code in src/fire_prox/fireprox.py
187
188
189
190
191
192
193
194
195
196
197
198
199
def collections(self, path: str, *, names_only: bool = False) -> list[Any]:
    """
    List subcollections beneath the specified document path.

    Args:
        path: Document path whose subcollections should be listed.
        names_only: Return collection IDs instead of FireCollection wrappers.

    Returns:
        List of subcollection names or FireCollection wrappers.
    """
    document = self.doc(path)
    return document.collections(names_only=names_only)

doc(path)

Get a reference to a document by its full path.

Creates a FireObject in ATTACHED state. No data is fetched from Firestore until an attribute is accessed (lazy loading).

Args: path: The full document path, e.g., 'users/alovelace' or 'users/uid/posts/post123'. Must be a valid Firestore document path with an even number of segments.

Returns: A FireObject instance in ATTACHED state.

Raises: ValueError: If path has an odd number of segments (invalid document path) or contains invalid characters.

Example: # Root-level document user = db.doc('users/alovelace')

# Nested document (subcollection)
post = db.doc('users/alovelace/posts/post123')

# Lazy loading
print(user.name)  # Triggers fetch on first access
Source code in src/fire_prox/fireprox.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
def doc(self, path: str) -> FireObject:
    """
    Get a reference to a document by its full path.

    Creates a FireObject in ATTACHED state. No data is fetched from
    Firestore until an attribute is accessed (lazy loading).

    Args:
        path: The full document path, e.g., 'users/alovelace' or
             'users/uid/posts/post123'. Must be a valid Firestore
             document path with an even number of segments.

    Returns:
        A FireObject instance in ATTACHED state.

    Raises:
        ValueError: If path has an odd number of segments (invalid
                   document path) or contains invalid characters.

    Example:
        # Root-level document
        user = db.doc('users/alovelace')

        # Nested document (subcollection)
        post = db.doc('users/alovelace/posts/post123')

        # Lazy loading
        print(user.name)  # Triggers fetch on first access
    """
    return self._create_document_proxy(path, FireObject)

document(path)

Alias for doc(). Get a reference to a document by its full path.

Provided for API consistency with the native library and user preference. Functionally identical to doc().

Args: path: The full document path.

Returns: A FireObject instance in ATTACHED state.

Source code in src/fire_prox/fireprox.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
def document(self, path: str) -> FireObject:
    """
    Alias for doc(). Get a reference to a document by its full path.

    Provided for API consistency with the native library and user
    preference. Functionally identical to doc().

    Args:
        path: The full document path.

    Returns:
        A FireObject instance in ATTACHED state.
    """
    return self.doc(path)

FireQuery

A chainable query builder for Firestore collections (synchronous).

FireQuery wraps the native google-cloud-firestore Query object and provides a simplified, chainable interface for building and executing queries. It follows an immutable pattern - each method returns a new FireQuery instance with the modified query.

This is the synchronous implementation. For async queries, use AsyncFireQuery.

Usage Examples: # Basic filtering query = users.where('birth_year', '>', 1800) for user in query.get(): print(user.name)

# Chaining multiple conditions
query = (users
         .where('birth_year', '>', 1800)
         .where('country', '==', 'England')
         .order_by('birth_year')
         .limit(10))
for user in query.get():
    print(f"{user.name} - {user.birth_year}")

# Stream results (generator)
for user in users.where('active', '==', True).stream():
    print(user.name)

Design Note: For complex queries beyond the scope of this builder (e.g., OR queries, advanced filtering), use the native Query API directly and hydrate results with FireObject.from_snapshot():

    native_query = client.collection('users').where(...)
    results = [FireObject.from_snapshot(snap) for snap in native_query.stream()]
Source code in src/fire_prox/fire_query.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
class FireQuery:
    """
    A chainable query builder for Firestore collections (synchronous).

    FireQuery wraps the native google-cloud-firestore Query object and provides
    a simplified, chainable interface for building and executing queries. It
    follows an immutable pattern - each method returns a new FireQuery instance
    with the modified query.

    This is the synchronous implementation. For async queries, use AsyncFireQuery.

    Usage Examples:
        # Basic filtering
        query = users.where('birth_year', '>', 1800)
        for user in query.get():
            print(user.name)

        # Chaining multiple conditions
        query = (users
                 .where('birth_year', '>', 1800)
                 .where('country', '==', 'England')
                 .order_by('birth_year')
                 .limit(10))
        for user in query.get():
            print(f"{user.name} - {user.birth_year}")

        # Stream results (generator)
        for user in users.where('active', '==', True).stream():
            print(user.name)

    Design Note:
        For complex queries beyond the scope of this builder (e.g., OR queries,
        advanced filtering), use the native Query API directly and hydrate results
        with FireObject.from_snapshot():

            native_query = client.collection('users').where(...)
            results = [FireObject.from_snapshot(snap) for snap in native_query.stream()]
    """

    def __init__(
        self,
        native_query: Query,
        parent_collection: Optional[Any] = None,
        projection: Optional[tuple] = None,
    ):
        """
        Initialize a FireQuery.

        Args:
            native_query: The underlying native Query object from google-cloud-firestore.
            parent_collection: Optional reference to parent FireCollection.
            projection: Optional tuple of field paths to project (select specific fields).
        """
        self._query = native_query
        self._parent_collection = parent_collection
        self._projection = projection

    # =========================================================================
    # Query Building Methods (Immutable Pattern)
    # =========================================================================

    def where(self, field: str, op: str, value: Any) -> 'FireQuery':
        """
        Add a filter condition to the query.

        Creates a new FireQuery with an additional filter condition.
        Uses the immutable pattern - returns a new instance rather than
        modifying the current query.

        Args:
            field: The field path to filter on (e.g., 'name', 'address.city').
            op: Comparison operator. Supported operators:
                '==' (equal), '!=' (not equal),
                '<' (less than), '<=' (less than or equal),
                '>' (greater than), '>=' (greater than or equal),
                'in' (value in list), 'not-in' (value not in list),
                'array-contains' (array contains value),
                'array-contains-any' (array contains any of the values).
            value: The value to compare against.

        Returns:
            A new FireQuery instance with the added filter.

        Example:
            # Single condition
            query = users.where('birth_year', '>', 1800)

            # Multiple conditions (chained)
            query = (users
                     .where('birth_year', '>', 1800)
                     .where('country', '==', 'England'))
        """
        # Create FieldFilter and add to query
        filter_obj = FieldFilter(field, op, value)
        new_query = self._query.where(filter=filter_obj)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def order_by(self, field: str, direction: str = 'ASCENDING') -> 'FireQuery':
        """
        Add an ordering clause to the query.

        Creates a new FireQuery with ordering by the specified field.

        Args:
            field: The field path to order by.
            direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'.
                      Default is 'ASCENDING'.

        Returns:
            A new FireQuery instance with the ordering applied.

        Example:
            # Ascending order
            query = users.order_by('birth_year')

            # Descending order
            query = users.order_by('birth_year', direction='DESCENDING')

            # Multiple orderings (chained)
            query = (users
                     .order_by('country')
                     .order_by('birth_year', direction='DESCENDING'))
        """
        # Convert direction string to Query constant
        if direction.upper() == 'ASCENDING':
            from google.cloud.firestore_v1 import Query as QueryClass
            direction_const = QueryClass.ASCENDING
        elif direction.upper() == 'DESCENDING':
            from google.cloud.firestore_v1 import Query as QueryClass
            direction_const = QueryClass.DESCENDING
        else:
            raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

        new_query = self._query.order_by(field, direction=direction_const)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def limit(self, count: int) -> 'FireQuery':
        """
        Limit the number of results returned.

        Creates a new FireQuery that will return at most `count` results.

        Args:
            count: Maximum number of documents to return. Must be positive.

        Returns:
            A new FireQuery instance with the limit applied.

        Raises:
            ValueError: If count is not positive.

        Example:
            # Get top 10 results
            query = users.order_by('score', direction='DESCENDING').limit(10)

            # Get first 5 matching documents
            query = users.where('active', '==', True).limit(5)
        """
        if count <= 0:
            raise ValueError(f"Limit count must be positive, got {count}")

        new_query = self._query.limit(count)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def start_at(self, *document_fields_or_snapshot) -> 'FireQuery':
        """
        Start query results at a cursor position (inclusive).

        Creates a new FireQuery that starts at the specified cursor. The cursor
        can be a document snapshot or a dictionary of field values matching the
        order_by fields.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot from a previous query
                - Direct field values matching order_by clause order

        Returns:
            A new FireQuery instance with the start cursor applied.

        Example:
            # Using field values (requires matching order_by)
            query = users.order_by('age').start_at({'age': 25})

            # Pagination: get first page, then start at last document
            page1 = users.order_by('age').limit(10).get()
            last_age = page1[-1].age
            page2 = users.order_by('age').start_at({'age': last_age}).limit(10).get()

            # Using a document snapshot
            last_doc_ref = page1[-1]._doc_ref
            last_snapshot = last_doc_ref.get()
            page2 = users.order_by('age').start_at(last_snapshot).limit(10).get()
        """
        new_query = self._query.start_at(*document_fields_or_snapshot)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def start_after(self, *document_fields_or_snapshot) -> 'FireQuery':
        """
        Start query results after a cursor position (exclusive).

        Creates a new FireQuery that starts after the specified cursor. The cursor
        document itself is excluded from results. This is typically used for
        pagination to avoid duplicating the last document from the previous page.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot from a previous query
                - Direct field values matching order_by clause order

        Returns:
            A new FireQuery instance with the start-after cursor applied.

        Example:
            # Pagination: exclude the last document from previous page
            page1 = users.order_by('age').limit(10).get()
            last_age = page1[-1].age
            page2 = users.order_by('age').start_after({'age': last_age}).limit(10).get()

            # Using a document snapshot (common pattern)
            last_doc_ref = page1[-1]._doc_ref
            last_snapshot = last_doc_ref.get()
            page2 = users.order_by('age').start_after(last_snapshot).limit(10).get()
        """
        new_query = self._query.start_after(*document_fields_or_snapshot)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def end_at(self, *document_fields_or_snapshot) -> 'FireQuery':
        """
        End query results at a cursor position (inclusive).

        Creates a new FireQuery that ends at the specified cursor. The cursor
        document is included in the results.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot
                - Direct field values matching order_by clause order

        Returns:
            A new FireQuery instance with the end cursor applied.

        Example:
            # Get all users up to and including age 50
            query = users.order_by('age').end_at({'age': 50})

            # Using a specific document as endpoint
            target_doc_ref = users.doc('user123')._doc_ref
            target_snapshot = target_doc_ref.get()
            query = users.order_by('age').end_at(target_snapshot)
        """
        new_query = self._query.end_at(*document_fields_or_snapshot)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def end_before(self, *document_fields_or_snapshot) -> 'FireQuery':
        """
        End query results before a cursor position (exclusive).

        Creates a new FireQuery that ends before the specified cursor. The cursor
        document itself is excluded from results.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot
                - Direct field values matching order_by clause order

        Returns:
            A new FireQuery instance with the end-before cursor applied.

        Example:
            # Get all users before age 50 (exclude 50)
            query = users.order_by('age').end_before({'age': 50})

            # Using a specific document as exclusive endpoint
            target_doc_ref = users.doc('user123')._doc_ref
            target_snapshot = target_doc_ref.get()
            query = users.order_by('age').end_before(target_snapshot)
        """
        new_query = self._query.end_before(*document_fields_or_snapshot)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def select(self, *field_paths: str) -> 'FireQuery':
        """
        Select specific fields to return (projection).

        Creates a new FireQuery that only returns the specified fields in the
        query results. When using projections, query results will be returned
        as vanilla dictionaries instead of FireObject instances. Any
        DocumentReferences in the returned dictionaries will be automatically
        converted to FireObject instances in ATTACHED state.

        Args:
            *field_paths: One or more field paths to select. Field paths can
                         include nested fields using dot notation (e.g., 'address.city').

        Returns:
            A new FireQuery instance with the projection applied.

        Raises:
            ValueError: If no field paths are provided.

        Example:
            # Select a single field
            query = users.select('name')
            results = query.get()
            # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

            # Select multiple fields
            query = users.select('name', 'email', 'birth_year')
            results = query.get()
            # Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

            # Select with filtering and ordering
            query = (users
                     .where('birth_year', '>', 1990)
                     .select('name', 'birth_year')
                     .order_by('birth_year')
                     .limit(10))

            # DocumentReferences are auto-converted to FireObjects
            query = posts.select('title', 'author')  # author is a DocumentReference
            results = query.get()
            # results[0]['author'] is a FireObject, not a DocumentReference
            print(results[0]['author'].name)  # Can access fields after fetch()

        Note:
            - Projection queries return dictionaries, not FireObject instances
            - Only the selected fields will be present in the returned dictionaries
            - DocumentReferences are automatically hydrated to FireObject instances
            - Projected results are more bandwidth-efficient for large documents
        """
        if not field_paths:
            raise ValueError("select() requires at least one field path")

        # Create new query with projection
        new_query = self._query.select(list(field_paths))
        return FireQuery(new_query, self._parent_collection, projection=field_paths)

    def find_nearest(
        self,
        vector_field: str,
        query_vector: Any,
        distance_measure: Any,
        limit: int,
        distance_result_field: Optional[str] = None,
    ) -> 'FireQuery':
        """
        Find the nearest neighbors based on vector similarity.

        Performs a vector similarity search on top of the current query filters.
        This allows you to combine pre-filtering with vector search (requires
        a composite index).

        Args:
            vector_field: Name of the field containing vector embeddings.
            query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
            distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
                DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
            limit: Maximum number of nearest neighbors to return (max 1000).
            distance_result_field: Optional field name to store the calculated distance
                in the query results.

        Returns:
            A new FireQuery instance with the vector search applied.

        Example:
            from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
            from google.cloud.firestore_v1.vector import Vector

            # Find nearest neighbors with pre-filtering
            query = (collection
                     .where('category', '==', 'tech')
                     .find_nearest(
                         vector_field="embedding",
                         query_vector=Vector([0.1, 0.2, 0.3]),
                         distance_measure=DistanceMeasure.COSINE,
                         limit=5
                     ))
            for doc in query.get():
                print(f"{doc.title}: {doc.category}")

        Note:
            - Requires a composite index when combining with where() clauses
            - Maximum limit is 1000 documents
            - Does not work with Firestore emulator (production only)
        """
        # Create vector query using native find_nearest
        new_query = self._query.find_nearest(
            vector_field=vector_field,
            query_vector=query_vector,
            distance_measure=distance_measure,
            limit=limit,
            distance_result_field=distance_result_field,
        )
        return FireQuery(new_query, self._parent_collection, self._projection)

    # =========================================================================
    # Aggregation Methods
    # =========================================================================

    def count(self) -> int:
        """
        Count documents matching the query.

        Executes an aggregation query to count the number of documents that
        match the current query filters without fetching the actual documents.
        This is more efficient than fetching all documents and counting them.

        Returns:
            Integer count of matching documents. Returns 0 if no documents match.

        Example:
            # Count all users
            total_users = users.count()
            # Returns: 150

            # Count with filters
            active_users = users.where('active', '==', True).count()
            # Returns: 42

            # Count with complex query
            count = (users
                     .where('age', '>', 25)
                     .where('country', '==', 'USA')
                     .count())
            # Returns: 37

        Note:
            This uses Firestore's native aggregation API, which is more efficient
            than fetching documents. However, it still counts as one document read
            per 1000 documents in the collection.
        """
        # Create aggregation query using Query's count method
        agg_query = self._query.count(alias='count')

        # Execute and extract result
        result = agg_query.get()
        if result and len(result) > 0:
            # Extract count from first (and only) aggregation result
            for agg_result in result:
                return agg_result[0].value
        return 0

    def sum(self, field: str) -> Union[int, float]:
        """
        Sum a numeric field across all matching documents.

        Executes an aggregation query to sum the values of a specific field
        without fetching the actual documents. The field must contain numeric
        values (int or float).

        Args:
            field: Name of the numeric field to sum.

        Returns:
            Sum of the field values across all matching documents.
            Returns 0 if no documents match or if all values are null.

        Raises:
            ValueError: If field is None or empty.

        Example:
            # Sum all salaries
            total_salary = employees.sum('salary')
            # Returns: 5000000

            # Sum with filters
            engineering_salary = (employees
                                  .where('department', '==', 'Engineering')
                                  .sum('salary'))
            # Returns: 2500000

            # Sum revenue from active products
            total_revenue = (products
                            .where('active', '==', True)
                            .sum('revenue'))
            # Returns: 1250000.50

        Note:
            - Null values are ignored in the sum
            - Non-numeric values will cause an error
            - This is more efficient than fetching all documents
        """
        if not field:
            raise ValueError("sum() requires a field name")

        # Create aggregation query using Query's sum method
        agg_query = self._query.sum(field, alias='sum')

        # Execute and extract result
        result = agg_query.get()
        if result and len(result) > 0:
            for agg_result in result:
                return agg_result[0].value
        return 0

    def avg(self, field: str) -> float:
        """
        Average a numeric field across all matching documents.

        Executes an aggregation query to calculate the arithmetic mean of a
        specific field without fetching the actual documents. The field must
        contain numeric values (int or float).

        Args:
            field: Name of the numeric field to average.

        Returns:
            Average of the field values across all matching documents.
            Returns 0.0 if no documents match or if all values are null.

        Raises:
            ValueError: If field is None or empty.

        Example:
            # Average age of all users
            avg_age = users.avg('age')
            # Returns: 32.5

            # Average with filters
            avg_salary = (employees
                         .where('department', '==', 'Engineering')
                         .avg('salary'))
            # Returns: 125000.0

            # Average rating for active products
            avg_rating = (products
                         .where('active', '==', True)
                         .avg('rating'))
            # Returns: 4.2

        Note:
            - Null values are ignored in the average calculation
            - Non-numeric values will cause an error
            - This is more efficient than fetching all documents
        """
        if not field:
            raise ValueError("avg() requires a field name")

        # Create aggregation query using Query's avg method
        agg_query = self._query.avg(field, alias='avg')

        # Execute and extract result
        result = agg_query.get()
        if result and len(result) > 0:
            for agg_result in result:
                value = agg_result[0].value
                return value if value is not None else 0.0
        return 0.0

    def aggregate(self, **aggregations) -> Dict[str, Any]:
        """
        Perform multiple aggregations in a single query.

        Executes an aggregation query with multiple aggregation operations
        (count, sum, average) without fetching the actual documents. This is
        more efficient than running multiple separate aggregation queries.

        Args:
            **aggregations: Named aggregations using Count(), Sum(field), or
                          Avg(field) from fire_prox.aggregation module.

        Returns:
            Dictionary mapping aggregation names to their results.

        Raises:
            ValueError: If no aggregations are provided or if invalid
                       aggregation types are used.

        Example:
            from fire_prox.aggregation import Count, Sum, Avg

            # Multiple aggregations in one query
            stats = employees.aggregate(
                total_count=Count(),
                total_salary=Sum('salary'),
                avg_salary=Avg('salary'),
                avg_age=Avg('age')
            )
            # Returns: {
            #     'total_count': 150,
            #     'total_salary': 15000000,
            #     'avg_salary': 100000.0,
            #     'avg_age': 35.2
            # }

            # With filters
            eng_stats = (employees
                        .where('department', '==', 'Engineering')
                        .aggregate(
                            count=Count(),
                            total_salary=Sum('salary')
                        ))
            # Returns: {'count': 50, 'total_salary': 5000000}

            # Financial dashboard
            financials = (transactions
                         .where('date', '>=', start_date)
                         .aggregate(
                             total_transactions=Count(),
                             total_revenue=Sum('amount'),
                             avg_transaction=Avg('amount')
                         ))

        Note:
            - Much more efficient than multiple separate aggregation queries
            - All aggregations execute in a single round-trip to Firestore
            - Null values are ignored in sum and average calculations
        """
        if not aggregations:
            raise ValueError("aggregate() requires at least one aggregation")

        from .aggregation import Avg, Count, Sum

        # Start with the first aggregation to create the AggregationQuery
        first_alias, first_agg_type = next(iter(aggregations.items()))

        if isinstance(first_agg_type, Count):
            agg_query = self._query.count(alias=first_alias)
        elif isinstance(first_agg_type, Sum):
            if not first_agg_type.field:
                raise ValueError(f"Sum aggregation '{first_alias}' is missing a field name")
            agg_query = self._query.sum(first_agg_type.field, alias=first_alias)
        elif isinstance(first_agg_type, Avg):
            if not first_agg_type.field:
                raise ValueError(f"Avg aggregation '{first_alias}' is missing a field name")
            agg_query = self._query.avg(first_agg_type.field, alias=first_alias)
        else:
            raise ValueError(
                f"Invalid aggregation type for '{first_alias}': {type(first_agg_type).__name__}. "
                f"Use Count(), Sum(field), or Avg(field)"
            )

        # Add remaining aggregations
        remaining_items = list(aggregations.items())[1:]
        for alias, agg_type in remaining_items:
            if isinstance(agg_type, Count):
                agg_query = agg_query.count(alias=alias)
            elif isinstance(agg_type, Sum):
                if not agg_type.field:
                    raise ValueError(f"Sum aggregation '{alias}' is missing a field name")
                agg_query = agg_query.sum(agg_type.field, alias=alias)
            elif isinstance(agg_type, Avg):
                if not agg_type.field:
                    raise ValueError(f"Avg aggregation '{alias}' is missing a field name")
                agg_query = agg_query.avg(agg_type.field, alias=alias)
            else:
                raise ValueError(
                    f"Invalid aggregation type for '{alias}': {type(agg_type).__name__}. "
                    f"Use Count(), Sum(field), or Avg(field)"
                )

        # Execute and extract results
        result = agg_query.get()
        results_dict = {}

        if result and len(result) > 0:
            # Extract all aggregation results by matching aliases
            for agg_result in result:
                for agg in agg_result:
                    value = agg.value
                    # Convert None to 0 for consistency
                    results_dict[agg.alias] = value if value is not None else 0

        return results_dict

    # =========================================================================
    # Helper Methods
    # =========================================================================

    def _convert_projection_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Convert DocumentReferences in projection data to FireObjects.

        Recursively processes a dictionary to convert any DocumentReference
        instances to FireObject instances in ATTACHED state. This allows
        users to work with references naturally using the FireProx API.

        Args:
            data: Dictionary containing projection data from Firestore.

        Returns:
            Dictionary with DocumentReferences converted to FireObjects.
        """
        from .state import State

        result = {}
        for key, value in data.items():
            if isinstance(value, DocumentReference):
                # Convert DocumentReference to FireObject in ATTACHED state
                result[key] = FireObject(
                    doc_ref=value,
                    initial_state=State.ATTACHED,
                    parent_collection=self._parent_collection
                )
            elif isinstance(value, list):
                # Recursively process lists
                result[key] = [
                    FireObject(
                        doc_ref=item,
                        initial_state=State.ATTACHED,
                        parent_collection=self._parent_collection
                    ) if isinstance(item, DocumentReference)
                    else self._convert_projection_data(item) if isinstance(item, dict)
                    else item
                    for item in value
                ]
            elif isinstance(value, dict):
                # Recursively process nested dictionaries
                result[key] = self._convert_projection_data(value)
            else:
                # Keep primitive values as-is
                result[key] = value
        return result

    # =========================================================================
    # Query Execution Methods
    # =========================================================================

    def get(self) -> Union[List[FireObject], List[Dict[str, Any]]]:
        """
        Execute the query and return results as a list.

        Fetches all matching documents and hydrates them into FireObject
        instances in LOADED state. If a projection is active (via .select()),
        returns vanilla dictionaries instead of FireObject instances.

        Returns:
            - If no projection: List of FireObject instances for all documents
              matching the query.
            - If projection active: List of dictionaries containing only the
              selected fields. DocumentReferences are converted to FireObjects.
            - Empty list if no documents match.

        Example:
            # Get all results as FireObjects
            users = query.get()
            for user in users:
                print(f"{user.name}: {user.birth_year}")

            # Get projected results as dictionaries
            users = query.select('name', 'email').get()
            for user_dict in users:
                print(f"{user_dict['name']}: {user_dict['email']}")

            # Check if results exist
            results = query.get()
            if results:
                print(f"Found {len(results)} users")
            else:
                print("No users found")
        """
        # Execute query
        snapshots = self._query.stream()

        # If projection is active, return vanilla dictionaries
        if self._projection:
            results = []
            for snap in snapshots:
                data = snap.to_dict()
                # Convert DocumentReferences to FireObjects
                converted_data = self._convert_projection_data(data)
                results.append(converted_data)
            return results

        # Otherwise, return FireObjects as usual
        return [FireObject.from_snapshot(snap, self._parent_collection) for snap in snapshots]

    def stream(self) -> Union[Iterator[FireObject], Iterator[Dict[str, Any]]]:
        """
        Execute the query and stream results as an iterator.

        Returns a generator that yields FireObject instances one at a time.
        This is more memory-efficient than .get() for large result sets
        as it doesn't load all results into memory at once. If a projection
        is active (via .select()), yields vanilla dictionaries instead.

        Yields:
            - If no projection: FireObject instances in LOADED state for each
              matching document.
            - If projection active: Dictionaries containing only the selected
              fields. DocumentReferences are converted to FireObjects.

        Example:
            # Stream results one at a time as FireObjects
            for user in query.stream():
                print(f"{user.name}: {user.birth_year}")
                # Process each user without loading all users into memory

            # Stream projected results as dictionaries
            for user_dict in query.select('name', 'email').stream():
                print(f"{user_dict['name']}: {user_dict['email']}")

            # Works with any query
            for post in (posts
                        .where('published', '==', True)
                        .order_by('date', direction='DESCENDING')
                        .stream()):
                print(post.title)
        """
        # If projection is active, stream vanilla dictionaries
        if self._projection:
            for snapshot in self._query.stream():
                data = snapshot.to_dict()
                # Convert DocumentReferences to FireObjects
                converted_data = self._convert_projection_data(data)
                yield converted_data
        else:
            # Otherwise, stream FireObjects as usual
            for snapshot in self._query.stream():
                yield FireObject.from_snapshot(snapshot, self._parent_collection)

    # =========================================================================
    # Real-Time Listeners (Sync-only)
    # =========================================================================

    def on_snapshot(self, callback: Any) -> Any:
        """
        Listen for real-time updates to this query.

        This method sets up a real-time listener that fires the callback
        whenever any document matching the query changes. The listener runs
        on a separate thread managed by the Firestore SDK.

        **Important**: This is a sync-only feature. The listener uses the
        underlying synchronous query to run on a background thread. This is
        the standard Firestore pattern for real-time listeners in Python.

        Args:
            callback: Callback function invoked on query changes.
                     Signature: callback(query_snapshot, changes, read_time)
                     - query_snapshot: List of DocumentSnapshot objects matching the query
                     - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                     - read_time: Timestamp of the snapshot

        Returns:
            Watch object with an `.unsubscribe()` method to stop listening.

        Example:
            import threading

            callback_done = threading.Event()

            def on_change(query_snapshot, changes, read_time):
                for change in changes:
                    if change.type.name == 'ADDED':
                        print(f"New: {change.document.id}")
                    elif change.type.name == 'MODIFIED':
                        print(f"Modified: {change.document.id}")
                    elif change.type.name == 'REMOVED':
                        print(f"Removed: {change.document.id}")
                callback_done.set()

            # Listen to active users only
            active_users = users.where('status', '==', 'active')
            watch = active_users.on_snapshot(on_change)

            # Wait for initial snapshot
            callback_done.wait()

            # Later: stop listening
            watch.unsubscribe()

        Note:
            The callback runs on a separate thread. Use threading primitives
            (Event, Lock, Queue) for synchronization with your main thread.
        """
        # Use the native query's on_snapshot method directly
        return self._query.on_snapshot(callback)

    def __repr__(self) -> str:
        """Return string representation of the query."""
        return f"<FireQuery query={self._query}>"

    def __str__(self) -> str:
        """Return human-readable string representation."""
        return f"FireQuery({self._query})"

__init__(native_query, parent_collection=None, projection=None)

Initialize a FireQuery.

Args: native_query: The underlying native Query object from google-cloud-firestore. parent_collection: Optional reference to parent FireCollection. projection: Optional tuple of field paths to project (select specific fields).

Source code in src/fire_prox/fire_query.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def __init__(
    self,
    native_query: Query,
    parent_collection: Optional[Any] = None,
    projection: Optional[tuple] = None,
):
    """
    Initialize a FireQuery.

    Args:
        native_query: The underlying native Query object from google-cloud-firestore.
        parent_collection: Optional reference to parent FireCollection.
        projection: Optional tuple of field paths to project (select specific fields).
    """
    self._query = native_query
    self._parent_collection = parent_collection
    self._projection = projection

__repr__()

Return string representation of the query.

Source code in src/fire_prox/fire_query.py
892
893
894
def __repr__(self) -> str:
    """Return string representation of the query."""
    return f"<FireQuery query={self._query}>"

__str__()

Return human-readable string representation.

Source code in src/fire_prox/fire_query.py
896
897
898
def __str__(self) -> str:
    """Return human-readable string representation."""
    return f"FireQuery({self._query})"

aggregate(**aggregations)

Perform multiple aggregations in a single query.

Executes an aggregation query with multiple aggregation operations (count, sum, average) without fetching the actual documents. This is more efficient than running multiple separate aggregation queries.

Args: **aggregations: Named aggregations using Count(), Sum(field), or Avg(field) from fire_prox.aggregation module.

Returns: Dictionary mapping aggregation names to their results.

Raises: ValueError: If no aggregations are provided or if invalid aggregation types are used.

Example: from fire_prox.aggregation import Count, Sum, Avg

# Multiple aggregations in one query
stats = employees.aggregate(
    total_count=Count(),
    total_salary=Sum('salary'),
    avg_salary=Avg('salary'),
    avg_age=Avg('age')
)
# Returns: {
#     'total_count': 150,
#     'total_salary': 15000000,
#     'avg_salary': 100000.0,
#     'avg_age': 35.2
# }

# With filters
eng_stats = (employees
            .where('department', '==', 'Engineering')
            .aggregate(
                count=Count(),
                total_salary=Sum('salary')
            ))
# Returns: {'count': 50, 'total_salary': 5000000}

# Financial dashboard
financials = (transactions
             .where('date', '>=', start_date)
             .aggregate(
                 total_transactions=Count(),
                 total_revenue=Sum('amount'),
                 avg_transaction=Avg('amount')
             ))

Note: - Much more efficient than multiple separate aggregation queries - All aggregations execute in a single round-trip to Firestore - Null values are ignored in sum and average calculations

Source code in src/fire_prox/fire_query.py
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
def aggregate(self, **aggregations) -> Dict[str, Any]:
    """
    Perform multiple aggregations in a single query.

    Executes an aggregation query with multiple aggregation operations
    (count, sum, average) without fetching the actual documents. This is
    more efficient than running multiple separate aggregation queries.

    Args:
        **aggregations: Named aggregations using Count(), Sum(field), or
                      Avg(field) from fire_prox.aggregation module.

    Returns:
        Dictionary mapping aggregation names to their results.

    Raises:
        ValueError: If no aggregations are provided or if invalid
                   aggregation types are used.

    Example:
        from fire_prox.aggregation import Count, Sum, Avg

        # Multiple aggregations in one query
        stats = employees.aggregate(
            total_count=Count(),
            total_salary=Sum('salary'),
            avg_salary=Avg('salary'),
            avg_age=Avg('age')
        )
        # Returns: {
        #     'total_count': 150,
        #     'total_salary': 15000000,
        #     'avg_salary': 100000.0,
        #     'avg_age': 35.2
        # }

        # With filters
        eng_stats = (employees
                    .where('department', '==', 'Engineering')
                    .aggregate(
                        count=Count(),
                        total_salary=Sum('salary')
                    ))
        # Returns: {'count': 50, 'total_salary': 5000000}

        # Financial dashboard
        financials = (transactions
                     .where('date', '>=', start_date)
                     .aggregate(
                         total_transactions=Count(),
                         total_revenue=Sum('amount'),
                         avg_transaction=Avg('amount')
                     ))

    Note:
        - Much more efficient than multiple separate aggregation queries
        - All aggregations execute in a single round-trip to Firestore
        - Null values are ignored in sum and average calculations
    """
    if not aggregations:
        raise ValueError("aggregate() requires at least one aggregation")

    from .aggregation import Avg, Count, Sum

    # Start with the first aggregation to create the AggregationQuery
    first_alias, first_agg_type = next(iter(aggregations.items()))

    if isinstance(first_agg_type, Count):
        agg_query = self._query.count(alias=first_alias)
    elif isinstance(first_agg_type, Sum):
        if not first_agg_type.field:
            raise ValueError(f"Sum aggregation '{first_alias}' is missing a field name")
        agg_query = self._query.sum(first_agg_type.field, alias=first_alias)
    elif isinstance(first_agg_type, Avg):
        if not first_agg_type.field:
            raise ValueError(f"Avg aggregation '{first_alias}' is missing a field name")
        agg_query = self._query.avg(first_agg_type.field, alias=first_alias)
    else:
        raise ValueError(
            f"Invalid aggregation type for '{first_alias}': {type(first_agg_type).__name__}. "
            f"Use Count(), Sum(field), or Avg(field)"
        )

    # Add remaining aggregations
    remaining_items = list(aggregations.items())[1:]
    for alias, agg_type in remaining_items:
        if isinstance(agg_type, Count):
            agg_query = agg_query.count(alias=alias)
        elif isinstance(agg_type, Sum):
            if not agg_type.field:
                raise ValueError(f"Sum aggregation '{alias}' is missing a field name")
            agg_query = agg_query.sum(agg_type.field, alias=alias)
        elif isinstance(agg_type, Avg):
            if not agg_type.field:
                raise ValueError(f"Avg aggregation '{alias}' is missing a field name")
            agg_query = agg_query.avg(agg_type.field, alias=alias)
        else:
            raise ValueError(
                f"Invalid aggregation type for '{alias}': {type(agg_type).__name__}. "
                f"Use Count(), Sum(field), or Avg(field)"
            )

    # Execute and extract results
    result = agg_query.get()
    results_dict = {}

    if result and len(result) > 0:
        # Extract all aggregation results by matching aliases
        for agg_result in result:
            for agg in agg_result:
                value = agg.value
                # Convert None to 0 for consistency
                results_dict[agg.alias] = value if value is not None else 0

    return results_dict

avg(field)

Average a numeric field across all matching documents.

Executes an aggregation query to calculate the arithmetic mean of a specific field without fetching the actual documents. The field must contain numeric values (int or float).

Args: field: Name of the numeric field to average.

Returns: Average of the field values across all matching documents. Returns 0.0 if no documents match or if all values are null.

Raises: ValueError: If field is None or empty.

Example: # Average age of all users avg_age = users.avg('age') # Returns: 32.5

# Average with filters
avg_salary = (employees
             .where('department', '==', 'Engineering')
             .avg('salary'))
# Returns: 125000.0

# Average rating for active products
avg_rating = (products
             .where('active', '==', True)
             .avg('rating'))
# Returns: 4.2

Note: - Null values are ignored in the average calculation - Non-numeric values will cause an error - This is more efficient than fetching all documents

Source code in src/fire_prox/fire_query.py
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
def avg(self, field: str) -> float:
    """
    Average a numeric field across all matching documents.

    Executes an aggregation query to calculate the arithmetic mean of a
    specific field without fetching the actual documents. The field must
    contain numeric values (int or float).

    Args:
        field: Name of the numeric field to average.

    Returns:
        Average of the field values across all matching documents.
        Returns 0.0 if no documents match or if all values are null.

    Raises:
        ValueError: If field is None or empty.

    Example:
        # Average age of all users
        avg_age = users.avg('age')
        # Returns: 32.5

        # Average with filters
        avg_salary = (employees
                     .where('department', '==', 'Engineering')
                     .avg('salary'))
        # Returns: 125000.0

        # Average rating for active products
        avg_rating = (products
                     .where('active', '==', True)
                     .avg('rating'))
        # Returns: 4.2

    Note:
        - Null values are ignored in the average calculation
        - Non-numeric values will cause an error
        - This is more efficient than fetching all documents
    """
    if not field:
        raise ValueError("avg() requires a field name")

    # Create aggregation query using Query's avg method
    agg_query = self._query.avg(field, alias='avg')

    # Execute and extract result
    result = agg_query.get()
    if result and len(result) > 0:
        for agg_result in result:
            value = agg_result[0].value
            return value if value is not None else 0.0
    return 0.0

count()

Count documents matching the query.

Executes an aggregation query to count the number of documents that match the current query filters without fetching the actual documents. This is more efficient than fetching all documents and counting them.

Returns: Integer count of matching documents. Returns 0 if no documents match.

Example: # Count all users total_users = users.count() # Returns: 150

# Count with filters
active_users = users.where('active', '==', True).count()
# Returns: 42

# Count with complex query
count = (users
         .where('age', '>', 25)
         .where('country', '==', 'USA')
         .count())
# Returns: 37

Note: This uses Firestore's native aggregation API, which is more efficient than fetching documents. However, it still counts as one document read per 1000 documents in the collection.

Source code in src/fire_prox/fire_query.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
def count(self) -> int:
    """
    Count documents matching the query.

    Executes an aggregation query to count the number of documents that
    match the current query filters without fetching the actual documents.
    This is more efficient than fetching all documents and counting them.

    Returns:
        Integer count of matching documents. Returns 0 if no documents match.

    Example:
        # Count all users
        total_users = users.count()
        # Returns: 150

        # Count with filters
        active_users = users.where('active', '==', True).count()
        # Returns: 42

        # Count with complex query
        count = (users
                 .where('age', '>', 25)
                 .where('country', '==', 'USA')
                 .count())
        # Returns: 37

    Note:
        This uses Firestore's native aggregation API, which is more efficient
        than fetching documents. However, it still counts as one document read
        per 1000 documents in the collection.
    """
    # Create aggregation query using Query's count method
    agg_query = self._query.count(alias='count')

    # Execute and extract result
    result = agg_query.get()
    if result and len(result) > 0:
        # Extract count from first (and only) aggregation result
        for agg_result in result:
            return agg_result[0].value
    return 0

end_at(*document_fields_or_snapshot)

End query results at a cursor position (inclusive).

Creates a new FireQuery that ends at the specified cursor. The cursor document is included in the results.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot - Direct field values matching order_by clause order

Returns: A new FireQuery instance with the end cursor applied.

Example: # Get all users up to and including age 50 query = users.order_by('age').end_at({'age': 50})

# Using a specific document as endpoint
target_doc_ref = users.doc('user123')._doc_ref
target_snapshot = target_doc_ref.get()
query = users.order_by('age').end_at(target_snapshot)
Source code in src/fire_prox/fire_query.py
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
def end_at(self, *document_fields_or_snapshot) -> 'FireQuery':
    """
    End query results at a cursor position (inclusive).

    Creates a new FireQuery that ends at the specified cursor. The cursor
    document is included in the results.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot
            - Direct field values matching order_by clause order

    Returns:
        A new FireQuery instance with the end cursor applied.

    Example:
        # Get all users up to and including age 50
        query = users.order_by('age').end_at({'age': 50})

        # Using a specific document as endpoint
        target_doc_ref = users.doc('user123')._doc_ref
        target_snapshot = target_doc_ref.get()
        query = users.order_by('age').end_at(target_snapshot)
    """
    new_query = self._query.end_at(*document_fields_or_snapshot)
    return FireQuery(new_query, self._parent_collection, self._projection)

end_before(*document_fields_or_snapshot)

End query results before a cursor position (exclusive).

Creates a new FireQuery that ends before the specified cursor. The cursor document itself is excluded from results.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot - Direct field values matching order_by clause order

Returns: A new FireQuery instance with the end-before cursor applied.

Example: # Get all users before age 50 (exclude 50) query = users.order_by('age').end_before({'age': 50})

# Using a specific document as exclusive endpoint
target_doc_ref = users.doc('user123')._doc_ref
target_snapshot = target_doc_ref.get()
query = users.order_by('age').end_before(target_snapshot)
Source code in src/fire_prox/fire_query.py
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
def end_before(self, *document_fields_or_snapshot) -> 'FireQuery':
    """
    End query results before a cursor position (exclusive).

    Creates a new FireQuery that ends before the specified cursor. The cursor
    document itself is excluded from results.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot
            - Direct field values matching order_by clause order

    Returns:
        A new FireQuery instance with the end-before cursor applied.

    Example:
        # Get all users before age 50 (exclude 50)
        query = users.order_by('age').end_before({'age': 50})

        # Using a specific document as exclusive endpoint
        target_doc_ref = users.doc('user123')._doc_ref
        target_snapshot = target_doc_ref.get()
        query = users.order_by('age').end_before(target_snapshot)
    """
    new_query = self._query.end_before(*document_fields_or_snapshot)
    return FireQuery(new_query, self._parent_collection, self._projection)

find_nearest(vector_field, query_vector, distance_measure, limit, distance_result_field=None)

Find the nearest neighbors based on vector similarity.

Performs a vector similarity search on top of the current query filters. This allows you to combine pre-filtering with vector search (requires a composite index).

Args: vector_field: Name of the field containing vector embeddings. query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector). distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN, DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT). limit: Maximum number of nearest neighbors to return (max 1000). distance_result_field: Optional field name to store the calculated distance in the query results.

Returns: A new FireQuery instance with the vector search applied.

Example: from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector

# Find nearest neighbors with pre-filtering
query = (collection
         .where('category', '==', 'tech')
         .find_nearest(
             vector_field="embedding",
             query_vector=Vector([0.1, 0.2, 0.3]),
             distance_measure=DistanceMeasure.COSINE,
             limit=5
         ))
for doc in query.get():
    print(f"{doc.title}: {doc.category}")

Note: - Requires a composite index when combining with where() clauses - Maximum limit is 1000 documents - Does not work with Firestore emulator (production only)

Source code in src/fire_prox/fire_query.py
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
def find_nearest(
    self,
    vector_field: str,
    query_vector: Any,
    distance_measure: Any,
    limit: int,
    distance_result_field: Optional[str] = None,
) -> 'FireQuery':
    """
    Find the nearest neighbors based on vector similarity.

    Performs a vector similarity search on top of the current query filters.
    This allows you to combine pre-filtering with vector search (requires
    a composite index).

    Args:
        vector_field: Name of the field containing vector embeddings.
        query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
        distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
            DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
        limit: Maximum number of nearest neighbors to return (max 1000).
        distance_result_field: Optional field name to store the calculated distance
            in the query results.

    Returns:
        A new FireQuery instance with the vector search applied.

    Example:
        from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
        from google.cloud.firestore_v1.vector import Vector

        # Find nearest neighbors with pre-filtering
        query = (collection
                 .where('category', '==', 'tech')
                 .find_nearest(
                     vector_field="embedding",
                     query_vector=Vector([0.1, 0.2, 0.3]),
                     distance_measure=DistanceMeasure.COSINE,
                     limit=5
                 ))
        for doc in query.get():
            print(f"{doc.title}: {doc.category}")

    Note:
        - Requires a composite index when combining with where() clauses
        - Maximum limit is 1000 documents
        - Does not work with Firestore emulator (production only)
    """
    # Create vector query using native find_nearest
    new_query = self._query.find_nearest(
        vector_field=vector_field,
        query_vector=query_vector,
        distance_measure=distance_measure,
        limit=limit,
        distance_result_field=distance_result_field,
    )
    return FireQuery(new_query, self._parent_collection, self._projection)

get()

Execute the query and return results as a list.

Fetches all matching documents and hydrates them into FireObject instances in LOADED state. If a projection is active (via .select()), returns vanilla dictionaries instead of FireObject instances.

Returns: - If no projection: List of FireObject instances for all documents matching the query. - If projection active: List of dictionaries containing only the selected fields. DocumentReferences are converted to FireObjects. - Empty list if no documents match.

Example: # Get all results as FireObjects users = query.get() for user in users: print(f"{user.name}: {user.birth_year}")

# Get projected results as dictionaries
users = query.select('name', 'email').get()
for user_dict in users:
    print(f"{user_dict['name']}: {user_dict['email']}")

# Check if results exist
results = query.get()
if results:
    print(f"Found {len(results)} users")
else:
    print("No users found")
Source code in src/fire_prox/fire_query.py
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
def get(self) -> Union[List[FireObject], List[Dict[str, Any]]]:
    """
    Execute the query and return results as a list.

    Fetches all matching documents and hydrates them into FireObject
    instances in LOADED state. If a projection is active (via .select()),
    returns vanilla dictionaries instead of FireObject instances.

    Returns:
        - If no projection: List of FireObject instances for all documents
          matching the query.
        - If projection active: List of dictionaries containing only the
          selected fields. DocumentReferences are converted to FireObjects.
        - Empty list if no documents match.

    Example:
        # Get all results as FireObjects
        users = query.get()
        for user in users:
            print(f"{user.name}: {user.birth_year}")

        # Get projected results as dictionaries
        users = query.select('name', 'email').get()
        for user_dict in users:
            print(f"{user_dict['name']}: {user_dict['email']}")

        # Check if results exist
        results = query.get()
        if results:
            print(f"Found {len(results)} users")
        else:
            print("No users found")
    """
    # Execute query
    snapshots = self._query.stream()

    # If projection is active, return vanilla dictionaries
    if self._projection:
        results = []
        for snap in snapshots:
            data = snap.to_dict()
            # Convert DocumentReferences to FireObjects
            converted_data = self._convert_projection_data(data)
            results.append(converted_data)
        return results

    # Otherwise, return FireObjects as usual
    return [FireObject.from_snapshot(snap, self._parent_collection) for snap in snapshots]

limit(count)

Limit the number of results returned.

Creates a new FireQuery that will return at most count results.

Args: count: Maximum number of documents to return. Must be positive.

Returns: A new FireQuery instance with the limit applied.

Raises: ValueError: If count is not positive.

Example: # Get top 10 results query = users.order_by('score', direction='DESCENDING').limit(10)

# Get first 5 matching documents
query = users.where('active', '==', True).limit(5)
Source code in src/fire_prox/fire_query.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
def limit(self, count: int) -> 'FireQuery':
    """
    Limit the number of results returned.

    Creates a new FireQuery that will return at most `count` results.

    Args:
        count: Maximum number of documents to return. Must be positive.

    Returns:
        A new FireQuery instance with the limit applied.

    Raises:
        ValueError: If count is not positive.

    Example:
        # Get top 10 results
        query = users.order_by('score', direction='DESCENDING').limit(10)

        # Get first 5 matching documents
        query = users.where('active', '==', True).limit(5)
    """
    if count <= 0:
        raise ValueError(f"Limit count must be positive, got {count}")

    new_query = self._query.limit(count)
    return FireQuery(new_query, self._parent_collection, self._projection)

on_snapshot(callback)

Listen for real-time updates to this query.

This method sets up a real-time listener that fires the callback whenever any document matching the query changes. The listener runs on a separate thread managed by the Firestore SDK.

Important: This is a sync-only feature. The listener uses the underlying synchronous query to run on a background thread. This is the standard Firestore pattern for real-time listeners in Python.

Args: callback: Callback function invoked on query changes. Signature: callback(query_snapshot, changes, read_time) - query_snapshot: List of DocumentSnapshot objects matching the query - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED) - read_time: Timestamp of the snapshot

Returns: Watch object with an .unsubscribe() method to stop listening.

Example: import threading

callback_done = threading.Event()

def on_change(query_snapshot, changes, read_time):
    for change in changes:
        if change.type.name == 'ADDED':
            print(f"New: {change.document.id}")
        elif change.type.name == 'MODIFIED':
            print(f"Modified: {change.document.id}")
        elif change.type.name == 'REMOVED':
            print(f"Removed: {change.document.id}")
    callback_done.set()

# Listen to active users only
active_users = users.where('status', '==', 'active')
watch = active_users.on_snapshot(on_change)

# Wait for initial snapshot
callback_done.wait()

# Later: stop listening
watch.unsubscribe()

Note: The callback runs on a separate thread. Use threading primitives (Event, Lock, Queue) for synchronization with your main thread.

Source code in src/fire_prox/fire_query.py
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
def on_snapshot(self, callback: Any) -> Any:
    """
    Listen for real-time updates to this query.

    This method sets up a real-time listener that fires the callback
    whenever any document matching the query changes. The listener runs
    on a separate thread managed by the Firestore SDK.

    **Important**: This is a sync-only feature. The listener uses the
    underlying synchronous query to run on a background thread. This is
    the standard Firestore pattern for real-time listeners in Python.

    Args:
        callback: Callback function invoked on query changes.
                 Signature: callback(query_snapshot, changes, read_time)
                 - query_snapshot: List of DocumentSnapshot objects matching the query
                 - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                 - read_time: Timestamp of the snapshot

    Returns:
        Watch object with an `.unsubscribe()` method to stop listening.

    Example:
        import threading

        callback_done = threading.Event()

        def on_change(query_snapshot, changes, read_time):
            for change in changes:
                if change.type.name == 'ADDED':
                    print(f"New: {change.document.id}")
                elif change.type.name == 'MODIFIED':
                    print(f"Modified: {change.document.id}")
                elif change.type.name == 'REMOVED':
                    print(f"Removed: {change.document.id}")
            callback_done.set()

        # Listen to active users only
        active_users = users.where('status', '==', 'active')
        watch = active_users.on_snapshot(on_change)

        # Wait for initial snapshot
        callback_done.wait()

        # Later: stop listening
        watch.unsubscribe()

    Note:
        The callback runs on a separate thread. Use threading primitives
        (Event, Lock, Queue) for synchronization with your main thread.
    """
    # Use the native query's on_snapshot method directly
    return self._query.on_snapshot(callback)

order_by(field, direction='ASCENDING')

Add an ordering clause to the query.

Creates a new FireQuery with ordering by the specified field.

Args: field: The field path to order by. direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

Returns: A new FireQuery instance with the ordering applied.

Example: # Ascending order query = users.order_by('birth_year')

# Descending order
query = users.order_by('birth_year', direction='DESCENDING')

# Multiple orderings (chained)
query = (users
         .order_by('country')
         .order_by('birth_year', direction='DESCENDING'))
Source code in src/fire_prox/fire_query.py
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
def order_by(self, field: str, direction: str = 'ASCENDING') -> 'FireQuery':
    """
    Add an ordering clause to the query.

    Creates a new FireQuery with ordering by the specified field.

    Args:
        field: The field path to order by.
        direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'.
                  Default is 'ASCENDING'.

    Returns:
        A new FireQuery instance with the ordering applied.

    Example:
        # Ascending order
        query = users.order_by('birth_year')

        # Descending order
        query = users.order_by('birth_year', direction='DESCENDING')

        # Multiple orderings (chained)
        query = (users
                 .order_by('country')
                 .order_by('birth_year', direction='DESCENDING'))
    """
    # Convert direction string to Query constant
    if direction.upper() == 'ASCENDING':
        from google.cloud.firestore_v1 import Query as QueryClass
        direction_const = QueryClass.ASCENDING
    elif direction.upper() == 'DESCENDING':
        from google.cloud.firestore_v1 import Query as QueryClass
        direction_const = QueryClass.DESCENDING
    else:
        raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

    new_query = self._query.order_by(field, direction=direction_const)
    return FireQuery(new_query, self._parent_collection, self._projection)

select(*field_paths)

Select specific fields to return (projection).

Creates a new FireQuery that only returns the specified fields in the query results. When using projections, query results will be returned as vanilla dictionaries instead of FireObject instances. Any DocumentReferences in the returned dictionaries will be automatically converted to FireObject instances in ATTACHED state.

Args: *field_paths: One or more field paths to select. Field paths can include nested fields using dot notation (e.g., 'address.city').

Returns: A new FireQuery instance with the projection applied.

Raises: ValueError: If no field paths are provided.

Example: # Select a single field query = users.select('name') results = query.get() # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

# Select multiple fields
query = users.select('name', 'email', 'birth_year')
results = query.get()
# Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

# Select with filtering and ordering
query = (users
         .where('birth_year', '>', 1990)
         .select('name', 'birth_year')
         .order_by('birth_year')
         .limit(10))

# DocumentReferences are auto-converted to FireObjects
query = posts.select('title', 'author')  # author is a DocumentReference
results = query.get()
# results[0]['author'] is a FireObject, not a DocumentReference
print(results[0]['author'].name)  # Can access fields after fetch()

Note: - Projection queries return dictionaries, not FireObject instances - Only the selected fields will be present in the returned dictionaries - DocumentReferences are automatically hydrated to FireObject instances - Projected results are more bandwidth-efficient for large documents

Source code in src/fire_prox/fire_query.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
def select(self, *field_paths: str) -> 'FireQuery':
    """
    Select specific fields to return (projection).

    Creates a new FireQuery that only returns the specified fields in the
    query results. When using projections, query results will be returned
    as vanilla dictionaries instead of FireObject instances. Any
    DocumentReferences in the returned dictionaries will be automatically
    converted to FireObject instances in ATTACHED state.

    Args:
        *field_paths: One or more field paths to select. Field paths can
                     include nested fields using dot notation (e.g., 'address.city').

    Returns:
        A new FireQuery instance with the projection applied.

    Raises:
        ValueError: If no field paths are provided.

    Example:
        # Select a single field
        query = users.select('name')
        results = query.get()
        # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

        # Select multiple fields
        query = users.select('name', 'email', 'birth_year')
        results = query.get()
        # Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

        # Select with filtering and ordering
        query = (users
                 .where('birth_year', '>', 1990)
                 .select('name', 'birth_year')
                 .order_by('birth_year')
                 .limit(10))

        # DocumentReferences are auto-converted to FireObjects
        query = posts.select('title', 'author')  # author is a DocumentReference
        results = query.get()
        # results[0]['author'] is a FireObject, not a DocumentReference
        print(results[0]['author'].name)  # Can access fields after fetch()

    Note:
        - Projection queries return dictionaries, not FireObject instances
        - Only the selected fields will be present in the returned dictionaries
        - DocumentReferences are automatically hydrated to FireObject instances
        - Projected results are more bandwidth-efficient for large documents
    """
    if not field_paths:
        raise ValueError("select() requires at least one field path")

    # Create new query with projection
    new_query = self._query.select(list(field_paths))
    return FireQuery(new_query, self._parent_collection, projection=field_paths)

start_after(*document_fields_or_snapshot)

Start query results after a cursor position (exclusive).

Creates a new FireQuery that starts after the specified cursor. The cursor document itself is excluded from results. This is typically used for pagination to avoid duplicating the last document from the previous page.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot from a previous query - Direct field values matching order_by clause order

Returns: A new FireQuery instance with the start-after cursor applied.

Example: # Pagination: exclude the last document from previous page page1 = users.order_by('age').limit(10).get() last_age = page1[-1].age page2 = users.order_by('age').start_after({'age': last_age}).limit(10).get()

# Using a document snapshot (common pattern)
last_doc_ref = page1[-1]._doc_ref
last_snapshot = last_doc_ref.get()
page2 = users.order_by('age').start_after(last_snapshot).limit(10).get()
Source code in src/fire_prox/fire_query.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
def start_after(self, *document_fields_or_snapshot) -> 'FireQuery':
    """
    Start query results after a cursor position (exclusive).

    Creates a new FireQuery that starts after the specified cursor. The cursor
    document itself is excluded from results. This is typically used for
    pagination to avoid duplicating the last document from the previous page.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot from a previous query
            - Direct field values matching order_by clause order

    Returns:
        A new FireQuery instance with the start-after cursor applied.

    Example:
        # Pagination: exclude the last document from previous page
        page1 = users.order_by('age').limit(10).get()
        last_age = page1[-1].age
        page2 = users.order_by('age').start_after({'age': last_age}).limit(10).get()

        # Using a document snapshot (common pattern)
        last_doc_ref = page1[-1]._doc_ref
        last_snapshot = last_doc_ref.get()
        page2 = users.order_by('age').start_after(last_snapshot).limit(10).get()
    """
    new_query = self._query.start_after(*document_fields_or_snapshot)
    return FireQuery(new_query, self._parent_collection, self._projection)

start_at(*document_fields_or_snapshot)

Start query results at a cursor position (inclusive).

Creates a new FireQuery that starts at the specified cursor. The cursor can be a document snapshot or a dictionary of field values matching the order_by fields.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot from a previous query - Direct field values matching order_by clause order

Returns: A new FireQuery instance with the start cursor applied.

Example: # Using field values (requires matching order_by) query = users.order_by('age').start_at({'age': 25})

# Pagination: get first page, then start at last document
page1 = users.order_by('age').limit(10).get()
last_age = page1[-1].age
page2 = users.order_by('age').start_at({'age': last_age}).limit(10).get()

# Using a document snapshot
last_doc_ref = page1[-1]._doc_ref
last_snapshot = last_doc_ref.get()
page2 = users.order_by('age').start_at(last_snapshot).limit(10).get()
Source code in src/fire_prox/fire_query.py
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
def start_at(self, *document_fields_or_snapshot) -> 'FireQuery':
    """
    Start query results at a cursor position (inclusive).

    Creates a new FireQuery that starts at the specified cursor. The cursor
    can be a document snapshot or a dictionary of field values matching the
    order_by fields.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot from a previous query
            - Direct field values matching order_by clause order

    Returns:
        A new FireQuery instance with the start cursor applied.

    Example:
        # Using field values (requires matching order_by)
        query = users.order_by('age').start_at({'age': 25})

        # Pagination: get first page, then start at last document
        page1 = users.order_by('age').limit(10).get()
        last_age = page1[-1].age
        page2 = users.order_by('age').start_at({'age': last_age}).limit(10).get()

        # Using a document snapshot
        last_doc_ref = page1[-1]._doc_ref
        last_snapshot = last_doc_ref.get()
        page2 = users.order_by('age').start_at(last_snapshot).limit(10).get()
    """
    new_query = self._query.start_at(*document_fields_or_snapshot)
    return FireQuery(new_query, self._parent_collection, self._projection)

stream()

Execute the query and stream results as an iterator.

Returns a generator that yields FireObject instances one at a time. This is more memory-efficient than .get() for large result sets as it doesn't load all results into memory at once. If a projection is active (via .select()), yields vanilla dictionaries instead.

Yields: - If no projection: FireObject instances in LOADED state for each matching document. - If projection active: Dictionaries containing only the selected fields. DocumentReferences are converted to FireObjects.

Example: # Stream results one at a time as FireObjects for user in query.stream(): print(f"{user.name}: {user.birth_year}") # Process each user without loading all users into memory

# Stream projected results as dictionaries
for user_dict in query.select('name', 'email').stream():
    print(f"{user_dict['name']}: {user_dict['email']}")

# Works with any query
for post in (posts
            .where('published', '==', True)
            .order_by('date', direction='DESCENDING')
            .stream()):
    print(post.title)
Source code in src/fire_prox/fire_query.py
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
def stream(self) -> Union[Iterator[FireObject], Iterator[Dict[str, Any]]]:
    """
    Execute the query and stream results as an iterator.

    Returns a generator that yields FireObject instances one at a time.
    This is more memory-efficient than .get() for large result sets
    as it doesn't load all results into memory at once. If a projection
    is active (via .select()), yields vanilla dictionaries instead.

    Yields:
        - If no projection: FireObject instances in LOADED state for each
          matching document.
        - If projection active: Dictionaries containing only the selected
          fields. DocumentReferences are converted to FireObjects.

    Example:
        # Stream results one at a time as FireObjects
        for user in query.stream():
            print(f"{user.name}: {user.birth_year}")
            # Process each user without loading all users into memory

        # Stream projected results as dictionaries
        for user_dict in query.select('name', 'email').stream():
            print(f"{user_dict['name']}: {user_dict['email']}")

        # Works with any query
        for post in (posts
                    .where('published', '==', True)
                    .order_by('date', direction='DESCENDING')
                    .stream()):
            print(post.title)
    """
    # If projection is active, stream vanilla dictionaries
    if self._projection:
        for snapshot in self._query.stream():
            data = snapshot.to_dict()
            # Convert DocumentReferences to FireObjects
            converted_data = self._convert_projection_data(data)
            yield converted_data
    else:
        # Otherwise, stream FireObjects as usual
        for snapshot in self._query.stream():
            yield FireObject.from_snapshot(snapshot, self._parent_collection)

sum(field)

Sum a numeric field across all matching documents.

Executes an aggregation query to sum the values of a specific field without fetching the actual documents. The field must contain numeric values (int or float).

Args: field: Name of the numeric field to sum.

Returns: Sum of the field values across all matching documents. Returns 0 if no documents match or if all values are null.

Raises: ValueError: If field is None or empty.

Example: # Sum all salaries total_salary = employees.sum('salary') # Returns: 5000000

# Sum with filters
engineering_salary = (employees
                      .where('department', '==', 'Engineering')
                      .sum('salary'))
# Returns: 2500000

# Sum revenue from active products
total_revenue = (products
                .where('active', '==', True)
                .sum('revenue'))
# Returns: 1250000.50

Note: - Null values are ignored in the sum - Non-numeric values will cause an error - This is more efficient than fetching all documents

Source code in src/fire_prox/fire_query.py
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
def sum(self, field: str) -> Union[int, float]:
    """
    Sum a numeric field across all matching documents.

    Executes an aggregation query to sum the values of a specific field
    without fetching the actual documents. The field must contain numeric
    values (int or float).

    Args:
        field: Name of the numeric field to sum.

    Returns:
        Sum of the field values across all matching documents.
        Returns 0 if no documents match or if all values are null.

    Raises:
        ValueError: If field is None or empty.

    Example:
        # Sum all salaries
        total_salary = employees.sum('salary')
        # Returns: 5000000

        # Sum with filters
        engineering_salary = (employees
                              .where('department', '==', 'Engineering')
                              .sum('salary'))
        # Returns: 2500000

        # Sum revenue from active products
        total_revenue = (products
                        .where('active', '==', True)
                        .sum('revenue'))
        # Returns: 1250000.50

    Note:
        - Null values are ignored in the sum
        - Non-numeric values will cause an error
        - This is more efficient than fetching all documents
    """
    if not field:
        raise ValueError("sum() requires a field name")

    # Create aggregation query using Query's sum method
    agg_query = self._query.sum(field, alias='sum')

    # Execute and extract result
    result = agg_query.get()
    if result and len(result) > 0:
        for agg_result in result:
            return agg_result[0].value
    return 0

where(field, op, value)

Add a filter condition to the query.

Creates a new FireQuery with an additional filter condition. Uses the immutable pattern - returns a new instance rather than modifying the current query.

Args: field: The field path to filter on (e.g., 'name', 'address.city'). op: Comparison operator. Supported operators: '==' (equal), '!=' (not equal), '<' (less than), '<=' (less than or equal), '>' (greater than), '>=' (greater than or equal), 'in' (value in list), 'not-in' (value not in list), 'array-contains' (array contains value), 'array-contains-any' (array contains any of the values). value: The value to compare against.

Returns: A new FireQuery instance with the added filter.

Example: # Single condition query = users.where('birth_year', '>', 1800)

# Multiple conditions (chained)
query = (users
         .where('birth_year', '>', 1800)
         .where('country', '==', 'England'))
Source code in src/fire_prox/fire_query.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def where(self, field: str, op: str, value: Any) -> 'FireQuery':
    """
    Add a filter condition to the query.

    Creates a new FireQuery with an additional filter condition.
    Uses the immutable pattern - returns a new instance rather than
    modifying the current query.

    Args:
        field: The field path to filter on (e.g., 'name', 'address.city').
        op: Comparison operator. Supported operators:
            '==' (equal), '!=' (not equal),
            '<' (less than), '<=' (less than or equal),
            '>' (greater than), '>=' (greater than or equal),
            'in' (value in list), 'not-in' (value not in list),
            'array-contains' (array contains value),
            'array-contains-any' (array contains any of the values).
        value: The value to compare against.

    Returns:
        A new FireQuery instance with the added filter.

    Example:
        # Single condition
        query = users.where('birth_year', '>', 1800)

        # Multiple conditions (chained)
        query = (users
                 .where('birth_year', '>', 1800)
                 .where('country', '==', 'England'))
    """
    # Create FieldFilter and add to query
    filter_obj = FieldFilter(field, op, value)
    new_query = self._query.where(filter=filter_obj)
    return FireQuery(new_query, self._parent_collection, self._projection)

State

Bases: Enum

Represents the synchronization state of a FireObject with Firestore.

The state machine ensures that FireObject instances correctly manage their lifecycle from creation through deletion, tracking whether data has been loaded from Firestore and whether local modifications need to be saved.

States: DETACHED: Object exists only in Python memory with no Firestore reference. This is the initial state for newly created documents that haven't been saved yet. All data is considered "dirty" as it's new.

ATTACHED: Object is linked to a Firestore document path and has a valid
         DocumentReference, but the document's data has not yet been fetched.
         This enables lazy loading - the reference exists but no network
         request has been made yet.

LOADED:  Object is fully synchronized with Firestore. It has a reference
         and its data has been fetched from the server into the local cache.
         This is the primary operational state for reading and modifying data.

DELETED: Object represents a document that has been deleted from Firestore.
         It retains its ID and path for reference but is marked as defunct
         to prevent further modifications or save operations.

State Transitions: DETACHED -> LOADED: Via save() with optional doc_id ATTACHED -> LOADED: Via fetch() or implicit fetch on attribute access LOADED -> LOADED: Via save() (if dirty) or fetch() (refresh) LOADED -> DELETED: Via delete()

Source code in src/fire_prox/state.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
class State(Enum):
    """
    Represents the synchronization state of a FireObject with Firestore.

    The state machine ensures that FireObject instances correctly manage their
    lifecycle from creation through deletion, tracking whether data has been
    loaded from Firestore and whether local modifications need to be saved.

    States:
        DETACHED: Object exists only in Python memory with no Firestore reference.
                 This is the initial state for newly created documents that haven't
                 been saved yet. All data is considered "dirty" as it's new.

        ATTACHED: Object is linked to a Firestore document path and has a valid
                 DocumentReference, but the document's data has not yet been fetched.
                 This enables lazy loading - the reference exists but no network
                 request has been made yet.

        LOADED:  Object is fully synchronized with Firestore. It has a reference
                 and its data has been fetched from the server into the local cache.
                 This is the primary operational state for reading and modifying data.

        DELETED: Object represents a document that has been deleted from Firestore.
                 It retains its ID and path for reference but is marked as defunct
                 to prevent further modifications or save operations.

    State Transitions:
        DETACHED -> LOADED:  Via save() with optional doc_id
        ATTACHED -> LOADED:  Via fetch() or implicit fetch on attribute access
        LOADED -> LOADED:    Via save() (if dirty) or fetch() (refresh)
        LOADED -> DELETED:   Via delete()
    """

    DETACHED = auto()  # No Firestore reference, exists only in memory
    ATTACHED = auto()  # Has reference but data not yet fetched (lazy)
    LOADED = auto()    # Has reference and data is loaded
    DELETED = auto()   # Document has been deleted from Firestore

    def __str__(self) -> str:
        """Return a human-readable string representation of the state."""
        return self.name

    def __repr__(self) -> str:
        """Return a detailed representation of the state."""
        return f"State.{self.name}"

__repr__()

Return a detailed representation of the state.

Source code in src/fire_prox/state.py
54
55
56
def __repr__(self) -> str:
    """Return a detailed representation of the state."""
    return f"State.{self.name}"

__str__()

Return a human-readable string representation of the state.

Source code in src/fire_prox/state.py
50
51
52
def __str__(self) -> str:
    """Return a human-readable string representation of the state."""
    return self.name

Sum

Bases: AggregationType

Sum aggregation - sums a numeric field across documents.

Requires a field name. The field must contain numeric values (int or float).

Example: # Sum all salaries total = employees.sum('salary')

# Sum via aggregate()
result = employees.aggregate(total_revenue=Sum('revenue'))
# Returns: {'total_revenue': 1500000}

Args: field: Name of the numeric field to sum.

Raises: ValueError: If field is not provided.

Source code in src/fire_prox/aggregation.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
class Sum(AggregationType):
    """
    Sum aggregation - sums a numeric field across documents.

    Requires a field name. The field must contain numeric values (int or float).

    Example:
        # Sum all salaries
        total = employees.sum('salary')

        # Sum via aggregate()
        result = employees.aggregate(total_revenue=Sum('revenue'))
        # Returns: {'total_revenue': 1500000}

    Args:
        field: Name of the numeric field to sum.

    Raises:
        ValueError: If field is not provided.
    """

    def __init__(self, field: str):
        """
        Initialize Sum aggregation.

        Args:
            field: Name of the numeric field to sum.

        Raises:
            ValueError: If field is None or empty.
        """
        if not field:
            raise ValueError("Sum aggregation requires a field name")
        super().__init__(field=field)

__init__(field)

Initialize Sum aggregation.

Args: field: Name of the numeric field to sum.

Raises: ValueError: If field is None or empty.

Source code in src/fire_prox/aggregation.py
85
86
87
88
89
90
91
92
93
94
95
96
97
def __init__(self, field: str):
    """
    Initialize Sum aggregation.

    Args:
        field: Name of the numeric field to sum.

    Raises:
        ValueError: If field is None or empty.
    """
    if not field:
        raise ValueError("Sum aggregation requires a field name")
    super().__init__(field=field)

aggregation

Aggregation helper classes for Firestore aggregation queries.

Provides Count, Sum, and Avg aggregation types that can be used with FireQuery.aggregate() method for efficient analytics queries without fetching all documents.

Example: from fire_prox.aggregation import Count, Sum, Avg

# Single aggregation
count = users.where('age', '>', 25).count()

# Multiple aggregations
stats = employees.aggregate(
    total=Count(),
    sum_salary=Sum('salary'),
    avg_age=Avg('age')
)

AggregationType

Base class for aggregation types.

Source code in src/fire_prox/aggregation.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
class AggregationType:
    """Base class for aggregation types."""

    def __init__(self, field: Optional[str] = None):
        """
        Initialize aggregation type.

        Args:
            field: Field name to aggregate (None for Count).
        """
        self.field = field

    def __repr__(self) -> str:
        """Return string representation."""
        if self.field:
            return f"{self.__class__.__name__}('{self.field}')"
        return f"{self.__class__.__name__}()"

__init__(field=None)

Initialize aggregation type.

Args: field: Field name to aggregate (None for Count).

Source code in src/fire_prox/aggregation.py
28
29
30
31
32
33
34
35
def __init__(self, field: Optional[str] = None):
    """
    Initialize aggregation type.

    Args:
        field: Field name to aggregate (None for Count).
    """
    self.field = field

__repr__()

Return string representation.

Source code in src/fire_prox/aggregation.py
37
38
39
40
41
def __repr__(self) -> str:
    """Return string representation."""
    if self.field:
        return f"{self.__class__.__name__}('{self.field}')"
    return f"{self.__class__.__name__}()"

Avg

Bases: AggregationType

Average aggregation - averages a numeric field across documents.

Requires a field name. The field must contain numeric values (int or float). Returns the arithmetic mean of all non-null values.

Example: # Average age avg_age = users.avg('age')

# Average via aggregate()
result = users.aggregate(avg_rating=Avg('rating'))
# Returns: {'avg_rating': 4.2}

Args: field: Name of the numeric field to average.

Raises: ValueError: If field is not provided.

Source code in src/fire_prox/aggregation.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
class Avg(AggregationType):
    """
    Average aggregation - averages a numeric field across documents.

    Requires a field name. The field must contain numeric values (int or float).
    Returns the arithmetic mean of all non-null values.

    Example:
        # Average age
        avg_age = users.avg('age')

        # Average via aggregate()
        result = users.aggregate(avg_rating=Avg('rating'))
        # Returns: {'avg_rating': 4.2}

    Args:
        field: Name of the numeric field to average.

    Raises:
        ValueError: If field is not provided.
    """

    def __init__(self, field: str):
        """
        Initialize Avg aggregation.

        Args:
            field: Name of the numeric field to average.

        Raises:
            ValueError: If field is None or empty.
        """
        if not field:
            raise ValueError("Avg aggregation requires a field name")
        super().__init__(field=field)

__init__(field)

Initialize Avg aggregation.

Args: field: Name of the numeric field to average.

Raises: ValueError: If field is None or empty.

Source code in src/fire_prox/aggregation.py
122
123
124
125
126
127
128
129
130
131
132
133
134
def __init__(self, field: str):
    """
    Initialize Avg aggregation.

    Args:
        field: Name of the numeric field to average.

    Raises:
        ValueError: If field is None or empty.
    """
    if not field:
        raise ValueError("Avg aggregation requires a field name")
    super().__init__(field=field)

Count

Bases: AggregationType

Count aggregation - counts matching documents.

Does not require a field name since it counts documents, not field values.

Example: # Count all active users count = users.where('active', '==', True).count()

# Count via aggregate()
result = users.aggregate(total_users=Count())
# Returns: {'total_users': 42}
Source code in src/fire_prox/aggregation.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
class Count(AggregationType):
    """
    Count aggregation - counts matching documents.

    Does not require a field name since it counts documents, not field values.

    Example:
        # Count all active users
        count = users.where('active', '==', True).count()

        # Count via aggregate()
        result = users.aggregate(total_users=Count())
        # Returns: {'total_users': 42}
    """

    def __init__(self):
        """Initialize Count aggregation (no field needed)."""
        super().__init__(field=None)

__init__()

Initialize Count aggregation (no field needed).

Source code in src/fire_prox/aggregation.py
59
60
61
def __init__(self):
    """Initialize Count aggregation (no field needed)."""
    super().__init__(field=None)

Sum

Bases: AggregationType

Sum aggregation - sums a numeric field across documents.

Requires a field name. The field must contain numeric values (int or float).

Example: # Sum all salaries total = employees.sum('salary')

# Sum via aggregate()
result = employees.aggregate(total_revenue=Sum('revenue'))
# Returns: {'total_revenue': 1500000}

Args: field: Name of the numeric field to sum.

Raises: ValueError: If field is not provided.

Source code in src/fire_prox/aggregation.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
class Sum(AggregationType):
    """
    Sum aggregation - sums a numeric field across documents.

    Requires a field name. The field must contain numeric values (int or float).

    Example:
        # Sum all salaries
        total = employees.sum('salary')

        # Sum via aggregate()
        result = employees.aggregate(total_revenue=Sum('revenue'))
        # Returns: {'total_revenue': 1500000}

    Args:
        field: Name of the numeric field to sum.

    Raises:
        ValueError: If field is not provided.
    """

    def __init__(self, field: str):
        """
        Initialize Sum aggregation.

        Args:
            field: Name of the numeric field to sum.

        Raises:
            ValueError: If field is None or empty.
        """
        if not field:
            raise ValueError("Sum aggregation requires a field name")
        super().__init__(field=field)

__init__(field)

Initialize Sum aggregation.

Args: field: Name of the numeric field to sum.

Raises: ValueError: If field is None or empty.

Source code in src/fire_prox/aggregation.py
85
86
87
88
89
90
91
92
93
94
95
96
97
def __init__(self, field: str):
    """
    Initialize Sum aggregation.

    Args:
        field: Name of the numeric field to sum.

    Raises:
        ValueError: If field is None or empty.
    """
    if not field:
        raise ValueError("Sum aggregation requires a field name")
    super().__init__(field=field)

async_fire_collection

AsyncFireCollection: Async version of FireCollection.

This module implements the asynchronous FireCollection class for use with google.cloud.firestore.AsyncClient.

AsyncFireCollection

Bases: BaseFireCollection

A wrapper around Firestore AsyncCollectionReference for document management.

AsyncFireCollection provides a simplified interface for creating new documents and querying collections asynchronously.

Usage Examples: # Get a collection users = db.collection('users')

# Create a new document in DETACHED state
new_user = users.new()
new_user.name = 'Ada Lovelace'
new_user.year = 1815
await new_user.save()

# Create with explicit ID
user = users.new()
user.name = 'Charles Babbage'
await user.save(doc_id='cbabbage')

# Phase 2: Query the collection
query = users.where('year', '>', 1800).limit(10)
async for user in query.get():
    print(user.name)
Source code in src/fire_prox/async_fire_collection.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
class AsyncFireCollection(BaseFireCollection):
    """
    A wrapper around Firestore AsyncCollectionReference for document management.

    AsyncFireCollection provides a simplified interface for creating new documents
    and querying collections asynchronously.

    Usage Examples:
        # Get a collection
        users = db.collection('users')

        # Create a new document in DETACHED state
        new_user = users.new()
        new_user.name = 'Ada Lovelace'
        new_user.year = 1815
        await new_user.save()

        # Create with explicit ID
        user = users.new()
        user.name = 'Charles Babbage'
        await user.save(doc_id='cbabbage')

        # Phase 2: Query the collection
        query = users.where('year', '>', 1800).limit(10)
        async for user in query.get():
            print(user.name)
    """

    # =========================================================================
    # Document Creation
    # =========================================================================

    def _instantiate_object(
        self,
        *,
        doc_ref: Any,
        initial_state: State,
        parent_collection: 'AsyncFireCollection',
        sync_doc_ref: Optional[Any] = None,
        sync_client: Optional[Any] = None,
        **_: Any,
    ) -> AsyncFireObject:
        """Instantiate the asynchronous FireObject wrapper."""
        return AsyncFireObject(
            doc_ref=doc_ref,
            sync_doc_ref=sync_doc_ref,
            sync_client=sync_client,
            initial_state=initial_state,
            parent_collection=parent_collection,
        )

    def _get_new_kwargs(self) -> dict[str, Any]:
        return {'sync_client': self._sync_client}

    def _get_doc_kwargs(self, doc_id: str) -> dict[str, Any]:
        sync_doc_ref = None
        if self._sync_client is not None:
            sync_doc_ref = self._sync_client.collection(self.path).document(doc_id)
        return {'sync_doc_ref': sync_doc_ref, 'sync_client': self._sync_client}

    def new(self) -> AsyncFireObject:
        """Create a new AsyncFireObject in DETACHED state."""
        return super().new()

    def doc(self, doc_id: str) -> AsyncFireObject:
        """Get a reference to a specific document in this collection."""
        return super().doc(doc_id)

    # =========================================================================
    # Properties (inherited from BaseFireCollection)
    # =========================================================================

    @property
    def parent(self) -> Optional[AsyncFireObject]:
        """
        Get the parent document if this is a subcollection.

        Phase 2 feature.

        Returns:
            AsyncFireObject representing the parent document if this is a
            subcollection, None if this is a root-level collection.
        """
        raise NotImplementedError("Phase 2 feature - subcollections")

    # =========================================================================
    # Query Methods (Phase 2)
    # =========================================================================

    def where(self, field: str, op: str, value: Any) -> 'AsyncFireQuery':
        """
        Create a query with a filter condition.

        Phase 2.5 feature. Builds a lightweight query for common filtering needs.

        Args:
            field: The field path to filter on.
            op: Comparison operator.
            value: The value to compare against.

        Returns:
            An AsyncFireQuery instance for method chaining.

        Example:
            query = users.where('birth_year', '>', 1800)
                        .where('country', '==', 'UK')
                        .limit(10)
            async for user in query.stream():
                print(user.name)
        """
        from google.cloud.firestore_v1.base_query import FieldFilter

        from .async_fire_query import AsyncFireQuery

        # Create initial query with filter
        filter_obj = FieldFilter(field, op, value)
        native_query = self._collection_ref.where(filter=filter_obj)
        return AsyncFireQuery(native_query, parent_collection=self)

    def order_by(
        self,
        field: str,
        direction: str = 'ASCENDING'
    ) -> 'AsyncFireQuery':
        """
        Create a query with ordering.

        Phase 2.5 feature.

        Args:
            field: The field path to order by.
            direction: 'ASCENDING' or 'DESCENDING'.

        Returns:
            An AsyncFireQuery instance for method chaining.
        """
        from google.cloud.firestore_v1 import Query as QueryClass

        from .async_fire_query import AsyncFireQuery

        # Convert direction string to constant
        if direction.upper() == 'ASCENDING':
            direction_const = QueryClass.ASCENDING
        elif direction.upper() == 'DESCENDING':
            direction_const = QueryClass.DESCENDING
        else:
            raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

        # Create query with ordering
        native_query = self._collection_ref.order_by(field, direction=direction_const)
        return AsyncFireQuery(native_query, parent_collection=self)

    def limit(self, count: int) -> 'AsyncFireQuery':
        """
        Create a query with a result limit.

        Phase 2.5 feature.

        Args:
            count: Maximum number of results to return.

        Returns:
            An AsyncFireQuery instance for method chaining.
        """
        from .async_fire_query import AsyncFireQuery

        if count <= 0:
            raise ValueError(f"Limit count must be positive, got {count}")

        # Create query with limit
        native_query = self._collection_ref.limit(count)
        return AsyncFireQuery(native_query, parent_collection=self)

    def select(self, *field_paths: str) -> 'AsyncFireQuery':
        """
        Create a query with field projection.

        Phase 4 Part 3 feature. Selects specific fields to return in query results.
        Returns vanilla dictionaries instead of AsyncFireObject instances.

        Args:
            *field_paths: One or more field paths to select.

        Returns:
            An AsyncFireQuery instance with projection applied.

        Example:
            # Select specific fields
            results = await users.select('name', 'email').get()
            # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]
        """
        from .async_fire_query import AsyncFireQuery

        if not field_paths:
            raise ValueError("select() requires at least one field path")

        # Create query with projection
        native_query = self._collection_ref.select(list(field_paths))
        return AsyncFireQuery(native_query, parent_collection=self, projection=field_paths)

    async def get_all(self) -> AsyncIterator[AsyncFireObject]:
        """
        Retrieve all documents in the collection.

        Phase 2.5 feature. Returns an async iterator of all documents.

        Yields:
            AsyncFireObject instances in LOADED state for each document.

        Example:
            async for user in users.get_all():
                print(f"{user.name}: {user.year}")
        """
        # Stream all documents from the collection
        async for snapshot in self._collection_ref.stream():
            yield AsyncFireObject.from_snapshot(snapshot, parent_collection=self)

    # =========================================================================
    # Vector Query Methods
    # =========================================================================

    def find_nearest(
        self,
        vector_field: str,
        query_vector: Any,
        distance_measure: Any,
        limit: int,
        distance_result_field: Optional[str] = None,
    ) -> 'AsyncFireQuery':
        """
        Find the nearest neighbors based on vector similarity.

        Performs a vector similarity search to find documents with embeddings
        nearest to the query vector. Requires a single-field vector index on
        the vector_field.

        Args:
            vector_field: Name of the field containing vector embeddings.
            query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
            distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
                DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
            limit: Maximum number of nearest neighbors to return (max 1000).
            distance_result_field: Optional field name to store the calculated distance
                in the query results.

        Returns:
            An AsyncFireQuery instance for method chaining and execution.

        Example:
            from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
            from google.cloud.firestore_v1.vector import Vector

            collection = db.collection("documents")
            query = collection.find_nearest(
                vector_field="embedding",
                query_vector=Vector([0.1, 0.2, 0.3]),
                distance_measure=DistanceMeasure.EUCLIDEAN,
                limit=5
            )
            async for doc in query.stream():
                print(f"{doc.title}: {doc.embedding}")

        Note:
            - Requires a vector index on the vector_field
            - Maximum limit is 1000 documents
            - Can be combined with where() for pre-filtering (requires composite index)
            - Does not work with Firestore emulator (production only)
        """
        from .async_fire_query import AsyncFireQuery

        # Create vector query using native find_nearest
        native_query = self._collection_ref.find_nearest(
            vector_field=vector_field,
            query_vector=query_vector,
            distance_measure=distance_measure,
            limit=limit,
            distance_result_field=distance_result_field,
        )
        return AsyncFireQuery(native_query, parent_collection=self)

    # =========================================================================
    # Aggregation Methods (Phase 4 Part 5)
    # =========================================================================

    async def count(self) -> int:
        """
        Count documents in the collection.

        Phase 4 Part 5 feature. Returns the total count of documents
        without fetching their data.

        Returns:
            The number of documents in the collection.

        Example:
            total = await users.count()
            print(f"Total users: {total}")
        """
        from .async_fire_query import AsyncFireQuery
        # Use collection reference directly as a query for aggregation
        query = AsyncFireQuery(self._collection_ref, parent_collection=self)
        return await query.count()

    async def sum(self, field: str):
        """
        Sum a numeric field across all documents.

        Phase 4 Part 5 feature. Calculates the sum of a numeric field
        without fetching document data.

        Args:
            field: The field name to sum.

        Returns:
            The sum of the field values (int or float).

        Example:
            total_revenue = await orders.sum('amount')
        """
        from .async_fire_query import AsyncFireQuery
        # Use collection reference directly as a query for aggregation
        query = AsyncFireQuery(self._collection_ref, parent_collection=self)
        return await query.sum(field)

    async def avg(self, field: str) -> float:
        """
        Average a numeric field across all documents.

        Phase 4 Part 5 feature. Calculates the average of a numeric field
        without fetching document data.

        Args:
            field: The field name to average.

        Returns:
            The average of the field values (float).

        Example:
            avg_rating = await products.avg('rating')
        """
        from .async_fire_query import AsyncFireQuery
        # Use collection reference directly as a query for aggregation
        query = AsyncFireQuery(self._collection_ref, parent_collection=self)
        return await query.avg(field)

    async def aggregate(self, **aggregations):
        """
        Execute multiple aggregations in a single query.

        Phase 4 Part 5 feature. Performs multiple aggregation operations
        (count, sum, avg) in one efficient query.

        Args:
            **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

        Returns:
            Dictionary mapping aggregation names to their results.

        Example:
            from fire_prox import Count, Sum, Avg

            stats = await users.aggregate(
                total=Count(),
                total_score=Sum('score'),
                avg_age=Avg('age')
            )
            # Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
        """
        from .async_fire_query import AsyncFireQuery
        # Use collection reference directly as a query for aggregation
        query = AsyncFireQuery(self._collection_ref, parent_collection=self)
        return await query.aggregate(**aggregations)

    # =========================================================================
    # Collection Deletion
    # =========================================================================

    async def delete_all(
        self,
        *,
        batch_size: int = 50,
        recursive: bool = True,
        dry_run: bool = False,
    ) -> Dict[str, int]:
        """
        Delete every document in this collection asynchronously.

        Firestore does not expose a server-side "drop collection" operation.
        This helper batches document deletes and, when recursive is True
        (default), also clears any nested subcollections before removing
        the parent document.

        Args:
            batch_size: Maximum number of deletes per commit.
            recursive: Whether to delete nested subcollections.
            dry_run: Count affected documents without executing writes.

        Returns:
            Dictionary with counts for deleted documents and subcollections
            visited during recursion.

        Raises:
            ValueError: If batch_size is not positive.
        """
        self._validate_batch_size(batch_size)

        return await self._delete_collection_recursive(
            collection_ref=self._collection_ref,
            batch_size=batch_size,
            recursive=recursive,
            dry_run=dry_run,
            include_self=False,
        )

    async def _delete_collection_recursive(
        self,
        *,
        collection_ref: Any,
        batch_size: int,
        recursive: bool,
        dry_run: bool,
        include_self: bool,
    ) -> Dict[str, int]:
        """Internal helper to delete documents within an async collection reference."""
        client = collection_ref._client
        stats = {'documents': 0, 'collections': 1 if include_self else 0}
        batch = None if dry_run else client.batch()
        ops_in_batch = 0

        async for doc_ref in collection_ref.list_documents(page_size=batch_size):
            if recursive:
                sub_stats = await self._delete_document_subcollections(
                    doc_ref,
                    batch_size=batch_size,
                    recursive=recursive,
                    dry_run=dry_run,
                )
                stats['documents'] += sub_stats['documents']
                stats['collections'] += sub_stats['collections']

            if not dry_run and batch is not None:
                batch.delete(doc_ref)
                ops_in_batch += 1

            stats['documents'] += 1

            if not dry_run and batch is not None and ops_in_batch >= batch_size:
                await batch.commit()
                batch = client.batch()
                ops_in_batch = 0

        if not dry_run and batch is not None and ops_in_batch:
            await batch.commit()

        return stats

    async def _delete_document_subcollections(
        self,
        doc_ref: Any,
        *,
        batch_size: int,
        recursive: bool,
        dry_run: bool,
    ) -> Dict[str, int]:
        """Delete all subcollections hanging off an async document reference."""
        stats = {'documents': 0, 'collections': 0}

        async for subcollection_ref in doc_ref.collections():
            sub_stats = await self._delete_collection_recursive(
                collection_ref=subcollection_ref,
                batch_size=batch_size,
                recursive=recursive,
                dry_run=dry_run,
                include_self=True,
            )
            stats['documents'] += sub_stats['documents']
            stats['collections'] += sub_stats['collections']

        return stats

parent property

Get the parent document if this is a subcollection.

Phase 2 feature.

Returns: AsyncFireObject representing the parent document if this is a subcollection, None if this is a root-level collection.

aggregate(**aggregations) async

Execute multiple aggregations in a single query.

Phase 4 Part 5 feature. Performs multiple aggregation operations (count, sum, avg) in one efficient query.

Args: **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

Returns: Dictionary mapping aggregation names to their results.

Example: from fire_prox import Count, Sum, Avg

stats = await users.aggregate(
    total=Count(),
    total_score=Sum('score'),
    avg_age=Avg('age')
)
# Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
Source code in src/fire_prox/async_fire_collection.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
async def aggregate(self, **aggregations):
    """
    Execute multiple aggregations in a single query.

    Phase 4 Part 5 feature. Performs multiple aggregation operations
    (count, sum, avg) in one efficient query.

    Args:
        **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

    Returns:
        Dictionary mapping aggregation names to their results.

    Example:
        from fire_prox import Count, Sum, Avg

        stats = await users.aggregate(
            total=Count(),
            total_score=Sum('score'),
            avg_age=Avg('age')
        )
        # Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
    """
    from .async_fire_query import AsyncFireQuery
    # Use collection reference directly as a query for aggregation
    query = AsyncFireQuery(self._collection_ref, parent_collection=self)
    return await query.aggregate(**aggregations)

avg(field) async

Average a numeric field across all documents.

Phase 4 Part 5 feature. Calculates the average of a numeric field without fetching document data.

Args: field: The field name to average.

Returns: The average of the field values (float).

Example: avg_rating = await products.avg('rating')

Source code in src/fire_prox/async_fire_collection.py
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
async def avg(self, field: str) -> float:
    """
    Average a numeric field across all documents.

    Phase 4 Part 5 feature. Calculates the average of a numeric field
    without fetching document data.

    Args:
        field: The field name to average.

    Returns:
        The average of the field values (float).

    Example:
        avg_rating = await products.avg('rating')
    """
    from .async_fire_query import AsyncFireQuery
    # Use collection reference directly as a query for aggregation
    query = AsyncFireQuery(self._collection_ref, parent_collection=self)
    return await query.avg(field)

count() async

Count documents in the collection.

Phase 4 Part 5 feature. Returns the total count of documents without fetching their data.

Returns: The number of documents in the collection.

Example: total = await users.count() print(f"Total users: {total}")

Source code in src/fire_prox/async_fire_collection.py
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
async def count(self) -> int:
    """
    Count documents in the collection.

    Phase 4 Part 5 feature. Returns the total count of documents
    without fetching their data.

    Returns:
        The number of documents in the collection.

    Example:
        total = await users.count()
        print(f"Total users: {total}")
    """
    from .async_fire_query import AsyncFireQuery
    # Use collection reference directly as a query for aggregation
    query = AsyncFireQuery(self._collection_ref, parent_collection=self)
    return await query.count()

delete_all(*, batch_size=50, recursive=True, dry_run=False) async

Delete every document in this collection asynchronously.

Firestore does not expose a server-side "drop collection" operation. This helper batches document deletes and, when recursive is True (default), also clears any nested subcollections before removing the parent document.

Args: batch_size: Maximum number of deletes per commit. recursive: Whether to delete nested subcollections. dry_run: Count affected documents without executing writes.

Returns: Dictionary with counts for deleted documents and subcollections visited during recursion.

Raises: ValueError: If batch_size is not positive.

Source code in src/fire_prox/async_fire_collection.py
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
async def delete_all(
    self,
    *,
    batch_size: int = 50,
    recursive: bool = True,
    dry_run: bool = False,
) -> Dict[str, int]:
    """
    Delete every document in this collection asynchronously.

    Firestore does not expose a server-side "drop collection" operation.
    This helper batches document deletes and, when recursive is True
    (default), also clears any nested subcollections before removing
    the parent document.

    Args:
        batch_size: Maximum number of deletes per commit.
        recursive: Whether to delete nested subcollections.
        dry_run: Count affected documents without executing writes.

    Returns:
        Dictionary with counts for deleted documents and subcollections
        visited during recursion.

    Raises:
        ValueError: If batch_size is not positive.
    """
    self._validate_batch_size(batch_size)

    return await self._delete_collection_recursive(
        collection_ref=self._collection_ref,
        batch_size=batch_size,
        recursive=recursive,
        dry_run=dry_run,
        include_self=False,
    )

doc(doc_id)

Get a reference to a specific document in this collection.

Source code in src/fire_prox/async_fire_collection.py
82
83
84
def doc(self, doc_id: str) -> AsyncFireObject:
    """Get a reference to a specific document in this collection."""
    return super().doc(doc_id)

find_nearest(vector_field, query_vector, distance_measure, limit, distance_result_field=None)

Find the nearest neighbors based on vector similarity.

Performs a vector similarity search to find documents with embeddings nearest to the query vector. Requires a single-field vector index on the vector_field.

Args: vector_field: Name of the field containing vector embeddings. query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector). distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN, DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT). limit: Maximum number of nearest neighbors to return (max 1000). distance_result_field: Optional field name to store the calculated distance in the query results.

Returns: An AsyncFireQuery instance for method chaining and execution.

Example: from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector

collection = db.collection("documents")
query = collection.find_nearest(
    vector_field="embedding",
    query_vector=Vector([0.1, 0.2, 0.3]),
    distance_measure=DistanceMeasure.EUCLIDEAN,
    limit=5
)
async for doc in query.stream():
    print(f"{doc.title}: {doc.embedding}")

Note: - Requires a vector index on the vector_field - Maximum limit is 1000 documents - Can be combined with where() for pre-filtering (requires composite index) - Does not work with Firestore emulator (production only)

Source code in src/fire_prox/async_fire_collection.py
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def find_nearest(
    self,
    vector_field: str,
    query_vector: Any,
    distance_measure: Any,
    limit: int,
    distance_result_field: Optional[str] = None,
) -> 'AsyncFireQuery':
    """
    Find the nearest neighbors based on vector similarity.

    Performs a vector similarity search to find documents with embeddings
    nearest to the query vector. Requires a single-field vector index on
    the vector_field.

    Args:
        vector_field: Name of the field containing vector embeddings.
        query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
        distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
            DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
        limit: Maximum number of nearest neighbors to return (max 1000).
        distance_result_field: Optional field name to store the calculated distance
            in the query results.

    Returns:
        An AsyncFireQuery instance for method chaining and execution.

    Example:
        from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
        from google.cloud.firestore_v1.vector import Vector

        collection = db.collection("documents")
        query = collection.find_nearest(
            vector_field="embedding",
            query_vector=Vector([0.1, 0.2, 0.3]),
            distance_measure=DistanceMeasure.EUCLIDEAN,
            limit=5
        )
        async for doc in query.stream():
            print(f"{doc.title}: {doc.embedding}")

    Note:
        - Requires a vector index on the vector_field
        - Maximum limit is 1000 documents
        - Can be combined with where() for pre-filtering (requires composite index)
        - Does not work with Firestore emulator (production only)
    """
    from .async_fire_query import AsyncFireQuery

    # Create vector query using native find_nearest
    native_query = self._collection_ref.find_nearest(
        vector_field=vector_field,
        query_vector=query_vector,
        distance_measure=distance_measure,
        limit=limit,
        distance_result_field=distance_result_field,
    )
    return AsyncFireQuery(native_query, parent_collection=self)

get_all() async

Retrieve all documents in the collection.

Phase 2.5 feature. Returns an async iterator of all documents.

Yields: AsyncFireObject instances in LOADED state for each document.

Example: async for user in users.get_all(): print(f"{user.name}: {user.year}")

Source code in src/fire_prox/async_fire_collection.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
async def get_all(self) -> AsyncIterator[AsyncFireObject]:
    """
    Retrieve all documents in the collection.

    Phase 2.5 feature. Returns an async iterator of all documents.

    Yields:
        AsyncFireObject instances in LOADED state for each document.

    Example:
        async for user in users.get_all():
            print(f"{user.name}: {user.year}")
    """
    # Stream all documents from the collection
    async for snapshot in self._collection_ref.stream():
        yield AsyncFireObject.from_snapshot(snapshot, parent_collection=self)

limit(count)

Create a query with a result limit.

Phase 2.5 feature.

Args: count: Maximum number of results to return.

Returns: An AsyncFireQuery instance for method chaining.

Source code in src/fire_prox/async_fire_collection.py
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def limit(self, count: int) -> 'AsyncFireQuery':
    """
    Create a query with a result limit.

    Phase 2.5 feature.

    Args:
        count: Maximum number of results to return.

    Returns:
        An AsyncFireQuery instance for method chaining.
    """
    from .async_fire_query import AsyncFireQuery

    if count <= 0:
        raise ValueError(f"Limit count must be positive, got {count}")

    # Create query with limit
    native_query = self._collection_ref.limit(count)
    return AsyncFireQuery(native_query, parent_collection=self)

new()

Create a new AsyncFireObject in DETACHED state.

Source code in src/fire_prox/async_fire_collection.py
78
79
80
def new(self) -> AsyncFireObject:
    """Create a new AsyncFireObject in DETACHED state."""
    return super().new()

order_by(field, direction='ASCENDING')

Create a query with ordering.

Phase 2.5 feature.

Args: field: The field path to order by. direction: 'ASCENDING' or 'DESCENDING'.

Returns: An AsyncFireQuery instance for method chaining.

Source code in src/fire_prox/async_fire_collection.py
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def order_by(
    self,
    field: str,
    direction: str = 'ASCENDING'
) -> 'AsyncFireQuery':
    """
    Create a query with ordering.

    Phase 2.5 feature.

    Args:
        field: The field path to order by.
        direction: 'ASCENDING' or 'DESCENDING'.

    Returns:
        An AsyncFireQuery instance for method chaining.
    """
    from google.cloud.firestore_v1 import Query as QueryClass

    from .async_fire_query import AsyncFireQuery

    # Convert direction string to constant
    if direction.upper() == 'ASCENDING':
        direction_const = QueryClass.ASCENDING
    elif direction.upper() == 'DESCENDING':
        direction_const = QueryClass.DESCENDING
    else:
        raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

    # Create query with ordering
    native_query = self._collection_ref.order_by(field, direction=direction_const)
    return AsyncFireQuery(native_query, parent_collection=self)

select(*field_paths)

Create a query with field projection.

Phase 4 Part 3 feature. Selects specific fields to return in query results. Returns vanilla dictionaries instead of AsyncFireObject instances.

Args: *field_paths: One or more field paths to select.

Returns: An AsyncFireQuery instance with projection applied.

Example: # Select specific fields results = await users.select('name', 'email').get() # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]

Source code in src/fire_prox/async_fire_collection.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
def select(self, *field_paths: str) -> 'AsyncFireQuery':
    """
    Create a query with field projection.

    Phase 4 Part 3 feature. Selects specific fields to return in query results.
    Returns vanilla dictionaries instead of AsyncFireObject instances.

    Args:
        *field_paths: One or more field paths to select.

    Returns:
        An AsyncFireQuery instance with projection applied.

    Example:
        # Select specific fields
        results = await users.select('name', 'email').get()
        # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]
    """
    from .async_fire_query import AsyncFireQuery

    if not field_paths:
        raise ValueError("select() requires at least one field path")

    # Create query with projection
    native_query = self._collection_ref.select(list(field_paths))
    return AsyncFireQuery(native_query, parent_collection=self, projection=field_paths)

sum(field) async

Sum a numeric field across all documents.

Phase 4 Part 5 feature. Calculates the sum of a numeric field without fetching document data.

Args: field: The field name to sum.

Returns: The sum of the field values (int or float).

Example: total_revenue = await orders.sum('amount')

Source code in src/fire_prox/async_fire_collection.py
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
async def sum(self, field: str):
    """
    Sum a numeric field across all documents.

    Phase 4 Part 5 feature. Calculates the sum of a numeric field
    without fetching document data.

    Args:
        field: The field name to sum.

    Returns:
        The sum of the field values (int or float).

    Example:
        total_revenue = await orders.sum('amount')
    """
    from .async_fire_query import AsyncFireQuery
    # Use collection reference directly as a query for aggregation
    query = AsyncFireQuery(self._collection_ref, parent_collection=self)
    return await query.sum(field)

where(field, op, value)

Create a query with a filter condition.

Phase 2.5 feature. Builds a lightweight query for common filtering needs.

Args: field: The field path to filter on. op: Comparison operator. value: The value to compare against.

Returns: An AsyncFireQuery instance for method chaining.

Example: query = users.where('birth_year', '>', 1800) .where('country', '==', 'UK') .limit(10) async for user in query.stream(): print(user.name)

Source code in src/fire_prox/async_fire_collection.py
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
def where(self, field: str, op: str, value: Any) -> 'AsyncFireQuery':
    """
    Create a query with a filter condition.

    Phase 2.5 feature. Builds a lightweight query for common filtering needs.

    Args:
        field: The field path to filter on.
        op: Comparison operator.
        value: The value to compare against.

    Returns:
        An AsyncFireQuery instance for method chaining.

    Example:
        query = users.where('birth_year', '>', 1800)
                    .where('country', '==', 'UK')
                    .limit(10)
        async for user in query.stream():
            print(user.name)
    """
    from google.cloud.firestore_v1.base_query import FieldFilter

    from .async_fire_query import AsyncFireQuery

    # Create initial query with filter
    filter_obj = FieldFilter(field, op, value)
    native_query = self._collection_ref.where(filter=filter_obj)
    return AsyncFireQuery(native_query, parent_collection=self)

async_fire_object

AsyncFireObject: Async version of FireObject for AsyncClient.

This module implements the asynchronous FireObject class for use with google.cloud.firestore.AsyncClient.

AsyncFireObject

Bases: BaseFireObject

Asynchronous schemaless, state-aware proxy for a Firestore document.

AsyncFireObject provides an object-oriented interface to Firestore documents using the async/await pattern for all I/O operations.

Lazy Loading: AsyncFireObject supports lazy loading via automatic fetch on attribute access. When accessing an attribute on an ATTACHED object, it will automatically fetch data from Firestore (using a synchronous thread to run the async fetch). This happens once per object - subsequent accesses are instant dict lookups.

Usage Examples: # Create a new document (DETACHED state) user = collection.new() user.name = 'Ada Lovelace' user.year = 1815 await user.save() # Transitions to LOADED

# Load existing document with lazy loading (automatic fetch)
user = db.doc('users/alovelace')  # ATTACHED state
print(user.name)  # Automatically fetches data, transitions to LOADED

# Or explicitly fetch if preferred
user = db.doc('users/alovelace')
await user.fetch()  # Explicit async fetch
print(user.name)

# Update and save
user.year = 1816
await user.save()

# Delete
await user.delete()
Source code in src/fire_prox/async_fire_object.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
class AsyncFireObject(BaseFireObject):
    """
    Asynchronous schemaless, state-aware proxy for a Firestore document.

    AsyncFireObject provides an object-oriented interface to Firestore documents
    using the async/await pattern for all I/O operations.

    Lazy Loading: AsyncFireObject supports lazy loading via automatic fetch on
    attribute access. When accessing an attribute on an ATTACHED object, it will
    automatically fetch data from Firestore (using a synchronous thread to run
    the async fetch). This happens once per object - subsequent accesses are
    instant dict lookups.

    Usage Examples:
        # Create a new document (DETACHED state)
        user = collection.new()
        user.name = 'Ada Lovelace'
        user.year = 1815
        await user.save()  # Transitions to LOADED

        # Load existing document with lazy loading (automatic fetch)
        user = db.doc('users/alovelace')  # ATTACHED state
        print(user.name)  # Automatically fetches data, transitions to LOADED

        # Or explicitly fetch if preferred
        user = db.doc('users/alovelace')
        await user.fetch()  # Explicit async fetch
        print(user.name)

        # Update and save
        user.year = 1816
        await user.save()

        # Delete
        await user.delete()
    """

    # =========================================================================
    # Firestore I/O Hooks
    # =========================================================================

    async def _get_snapshot(self, transaction: Optional[Any] = None) -> DocumentSnapshot:
        """Retrieve a document snapshot using the async client."""
        if transaction is not None:
            return await self._doc_ref.get(transaction=transaction)
        return await self._doc_ref.get()

    def _create_document(self, doc_id: Optional[str] = None) -> AsyncDocumentReference:
        """Create a new async document reference for DETACHED saves."""
        if not self._parent_collection:
            raise ValueError("DETACHED object has no parent collection")

        collection_ref = self._parent_collection._collection_ref
        if doc_id:
            doc_ref = collection_ref.document(doc_id)
        else:
            doc_ref = collection_ref.document()

        object.__setattr__(self, '_doc_ref', doc_ref)

        if self._sync_client is not None:
            sync_ref = self._sync_client.document(doc_ref.path)
            object.__setattr__(self, '_sync_doc_ref', sync_ref)

        return doc_ref

    async def _write_set(
        self,
        data: Dict[str, Any],
        doc_ref: Optional[AsyncDocumentReference] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> None:
        """Persist data via a set call on the async client."""
        target_ref = doc_ref or self._doc_ref

        if transaction is not None:
            transaction.set(target_ref, data)
        elif batch is not None:
            batch.set(target_ref, data)
        else:
            await target_ref.set(data)

    async def _write_update(
        self,
        update_dict: Dict[str, Any],
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> None:
        """Perform an update operation using the async client."""
        if transaction is not None:
            transaction.update(self._doc_ref, update_dict)
        elif batch is not None:
            batch.update(self._doc_ref, update_dict)
        else:
            await self._doc_ref.update(update_dict)

    async def _write_delete(self, batch: Optional[Any] = None) -> None:
        """Delete the document using the async client."""
        if batch is not None:
            batch.delete(self._doc_ref)
        else:
            await self._doc_ref.delete()

    def __getattr__(self, name: str) -> Any:
        """
        Handle attribute access for document fields with lazy loading.

        This method implements lazy loading: if the object is in ATTACHED state,
        accessing any data attribute will automatically trigger a synchronous fetch
        to load the data from Firestore using a companion sync client.

        This fetch happens **once per object** - after the first attribute access,
        the object transitions to LOADED state and subsequent accesses are instant
        dict lookups.

        Args:
            name: The attribute name being accessed.

        Returns:
            The value of the field from the internal _data cache.

        Raises:
            AttributeError: If the attribute doesn't exist in _data after
                           fetching (if necessary).
            NotFound: If document doesn't exist in Firestore (during lazy load).

        State Transitions:
            ATTACHED -> LOADED: Automatically fetches data on first access.

        Example:
            user = db.doc('users/alovelace')  # ATTACHED
            name = user.name  # Triggers sync fetch, transitions to LOADED
            year = user.year  # No fetch needed, already LOADED
        """
        if name in self._INTERNAL_ATTRS:
            raise AttributeError(f"Internal attribute {name} not set")

        # If we're in ATTACHED state, trigger lazy loading via sync fetch
        if self._state == State.ATTACHED and self._sync_doc_ref:
            # Use sync doc ref for lazy loading (synchronous fetch)
            snapshot = self._sync_doc_ref.get()

            if not snapshot.exists:
                raise NotFound(f"Document {self._sync_doc_ref.path} does not exist")

            # Get data and convert special types (DocumentReference → FireObject, etc.)
            data = snapshot.to_dict() or {}
            converted_data = {}
            sync_client = (
                self._sync_doc_ref._client
                if hasattr(self, '_sync_doc_ref') and self._sync_doc_ref
                else None
            )
            for key, value in data.items():
                converted_data[key] = self._convert_snapshot_value_for_retrieval(
                    value,
                    is_async=True,
                    sync_client=sync_client,
                )

            # Transition to LOADED with converted data
            self._transition_to_loaded(converted_data)

        return self._materialize_field(name)

    # =========================================================================
    # Async Lifecycle Methods
    # =========================================================================

    async def fetch(self, force: bool = False, transaction: Optional[Any] = None) -> 'AsyncFireObject':
        """
        Fetch document data from Firestore asynchronously.

        Args:
            force: If True, fetch data even if already LOADED.
            transaction: Optional transaction object for transactional reads.

        Returns:
            Self, to allow method chaining.

        Raises:
            ValueError: If called on DETACHED object.
            RuntimeError: If called on DELETED object.
            NotFound: If document doesn't exist.

        State Transitions:
            ATTACHED -> LOADED
            LOADED -> LOADED (if force=True)

        Example:
            # Normal fetch
            user = db.doc('users/alovelace')  # ATTACHED
            await user.fetch()  # Now LOADED

            # Transactional fetch
            transaction = db.transaction()
            @firestore.async_transactional
            async def read_user(transaction):
                await user.fetch(transaction=transaction)
                return user.credits
            credits = await read_user(transaction)
        """
        if self._should_skip_fetch(force):
            return self

        snapshot = await self._get_snapshot(transaction)
        self._process_snapshot(snapshot, is_async=True)

        return self

    async def save(
        self,
        doc_id: Optional[str] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> 'AsyncFireObject':
        """
        Save the object's data to Firestore asynchronously.

        Args:
            doc_id: Optional custom document ID for DETACHED objects.
            transaction: Optional transaction object for transactional writes.
            batch: Optional batch object for batched writes. If provided,
                  the write will be accumulated in the batch (committed later).

        Returns:
            Self, to allow method chaining.

        Raises:
            RuntimeError: If called on DELETED object.
            ValueError: If DETACHED without parent_collection, or if
                       trying to create a new document within a transaction or batch.

        State Transitions:
            DETACHED -> LOADED (creates new document)
            LOADED -> LOADED (updates if dirty)

        Example:
            # Normal save
            user = collection.new()
            user.name = 'Ada'
            await user.save(doc_id='alovelace')

            # Transactional save
            transaction = db.transaction()
            @firestore.async_transactional
            async def update_user(transaction):
                await user.fetch(transaction=transaction)
                user.credits += 10
                await user.save(transaction=transaction)
            await update_user(transaction)

            # Batch save
            batch = db.batch()
            user1.save(batch=batch)
            user2.save(batch=batch)
            await batch.commit()  # Commit all operations
        """
        self._validate_not_deleted("save()")

        if self._state == State.DETACHED:
            doc_ref, storage_data = self._prepare_detached_save(doc_id, transaction, batch)
            await self._write_set(storage_data, doc_ref=doc_ref)
            object.__setattr__(self, '_state', State.LOADED)
            self._mark_clean()
            return self

        if self._state == State.LOADED:
            if not self.is_dirty():
                return self

            update_dict = self._build_update_dict()
            await self._write_update(update_dict, transaction=transaction, batch=batch)
            self._mark_clean()
            return self

        if self._state == State.ATTACHED:
            storage_data = self._prepare_data_for_storage()
            await self._write_set(storage_data, transaction=transaction, batch=batch)
            object.__setattr__(self, '_state', State.LOADED)
            self._mark_clean()
            return self

        return self

    async def collections(self, names_only: bool = False) -> List[Any]:
        """
        List subcollections beneath this document asynchronously.

        Args:
            names_only: When True, return collection IDs instead of wrappers.

        Returns:
            List of subcollection names or AsyncFireCollection wrappers.
        """
        self._validate_not_detached("collections()")
        self._validate_not_deleted("collections()")

        results: List[Any] = []
        async for subcollection_ref in self._doc_ref.collections():
            if names_only:
                results.append(subcollection_ref.id)
            else:
                results.append(self.collection(subcollection_ref.id))
        return results

    async def delete(
        self,
        batch: Optional[Any] = None,
        *,
        recursive: bool = True,
        batch_size: int = 50,
    ) -> None:
        """
        Delete the document from Firestore asynchronously.

        Args:
            batch: Optional batch object for batched deletes. If provided,
                  the delete will be accumulated in the batch (committed later).
            recursive: When True (default), delete all subcollections first.
            batch_size: Batch size to use for recursive subcollection cleanup.

        Raises:
            ValueError: If called on DETACHED object.
            RuntimeError: If called on DELETED object.
            ValueError: If recursive deletion is requested while using a batch.

        State Transitions:
            ATTACHED -> DELETED
            LOADED -> DELETED

        Example:
            user = db.doc('users/alovelace')
            await user.delete()

            # Batch delete
            batch = db.batch()
            user1.delete(batch=batch, recursive=False)
            user2.delete(batch=batch, recursive=False)
            await batch.commit()  # Commit all operations
        """
        if recursive:
            if batch is not None:
                raise ValueError("Cannot delete recursively as part of a batch.")
            if batch_size <= 0:
                raise ValueError(f"batch_size must be positive, got {batch_size}")
            await self._delete_descendant_collections(batch_size=batch_size)

        self._prepare_delete()
        await self._write_delete(batch=batch)
        self._transition_to_deleted()

    async def _delete_descendant_collections(self, batch_size: int) -> None:
        """Delete all subcollections beneath this document asynchronously."""
        names = await self.collections(names_only=True)
        for name in names:
            subcollection = self.collection(name)
            await subcollection.delete_all(batch_size=batch_size, recursive=True)

    # =========================================================================
    # Subcollection Utilities
    # =========================================================================

    async def delete_subcollection(
        self,
        name: str,
        *,
        batch_size: int = 50,
        recursive: bool = True,
        dry_run: bool = False,
    ) -> Dict[str, int]:
        """
        Delete a subcollection beneath this document asynchronously.

        Args:
            name: Subcollection name relative to this document.
            batch_size: Maximum number of deletes per commit.
            recursive: Whether to delete nested subcollections.
            dry_run: Count affected documents without executing writes.

        Returns:
            Dictionary with counts for deleted documents and subcollections.
        """
        subcollection = self.collection(name)
        return await subcollection.delete_all(
            batch_size=batch_size,
            recursive=recursive,
            dry_run=dry_run,
        )

    # =========================================================================
    # Factory Methods
    # =========================================================================

    @classmethod
    def from_snapshot(
        cls,
        snapshot: DocumentSnapshot,
        parent_collection: Optional[Any] = None,
        sync_client: Optional[Any] = None
    ) -> 'AsyncFireObject':
        """
        Create an AsyncFireObject from a DocumentSnapshot.

        Args:
            snapshot: DocumentSnapshot from native async API.
            parent_collection: Optional parent collection reference.
            sync_client: Optional sync Firestore client for async lazy loading.

        Returns:
            AsyncFireObject in LOADED state.

        Raises:
            ValueError: If snapshot doesn't exist.

        Example:
            async for doc in query.stream():
                user = AsyncFireObject.from_snapshot(doc)
        """
        init_data = cls._create_from_snapshot_base(snapshot, parent_collection, sync_client)

        obj = cls(
            doc_ref=init_data['doc_ref'],
            initial_state=init_data['initial_state'],
            parent_collection=init_data['parent_collection'],
            sync_client=sync_client
        )

        object.__setattr__(obj, '_data', init_data['data'])
        # Dirty tracking is already cleared by __init__ and _transition_to_loaded

        return obj

__getattr__(name)

Handle attribute access for document fields with lazy loading.

This method implements lazy loading: if the object is in ATTACHED state, accessing any data attribute will automatically trigger a synchronous fetch to load the data from Firestore using a companion sync client.

This fetch happens once per object - after the first attribute access, the object transitions to LOADED state and subsequent accesses are instant dict lookups.

Args: name: The attribute name being accessed.

Returns: The value of the field from the internal _data cache.

Raises: AttributeError: If the attribute doesn't exist in _data after fetching (if necessary). NotFound: If document doesn't exist in Firestore (during lazy load).

State Transitions: ATTACHED -> LOADED: Automatically fetches data on first access.

Example: user = db.doc('users/alovelace') # ATTACHED name = user.name # Triggers sync fetch, transitions to LOADED year = user.year # No fetch needed, already LOADED

Source code in src/fire_prox/async_fire_object.py
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
def __getattr__(self, name: str) -> Any:
    """
    Handle attribute access for document fields with lazy loading.

    This method implements lazy loading: if the object is in ATTACHED state,
    accessing any data attribute will automatically trigger a synchronous fetch
    to load the data from Firestore using a companion sync client.

    This fetch happens **once per object** - after the first attribute access,
    the object transitions to LOADED state and subsequent accesses are instant
    dict lookups.

    Args:
        name: The attribute name being accessed.

    Returns:
        The value of the field from the internal _data cache.

    Raises:
        AttributeError: If the attribute doesn't exist in _data after
                       fetching (if necessary).
        NotFound: If document doesn't exist in Firestore (during lazy load).

    State Transitions:
        ATTACHED -> LOADED: Automatically fetches data on first access.

    Example:
        user = db.doc('users/alovelace')  # ATTACHED
        name = user.name  # Triggers sync fetch, transitions to LOADED
        year = user.year  # No fetch needed, already LOADED
    """
    if name in self._INTERNAL_ATTRS:
        raise AttributeError(f"Internal attribute {name} not set")

    # If we're in ATTACHED state, trigger lazy loading via sync fetch
    if self._state == State.ATTACHED and self._sync_doc_ref:
        # Use sync doc ref for lazy loading (synchronous fetch)
        snapshot = self._sync_doc_ref.get()

        if not snapshot.exists:
            raise NotFound(f"Document {self._sync_doc_ref.path} does not exist")

        # Get data and convert special types (DocumentReference → FireObject, etc.)
        data = snapshot.to_dict() or {}
        converted_data = {}
        sync_client = (
            self._sync_doc_ref._client
            if hasattr(self, '_sync_doc_ref') and self._sync_doc_ref
            else None
        )
        for key, value in data.items():
            converted_data[key] = self._convert_snapshot_value_for_retrieval(
                value,
                is_async=True,
                sync_client=sync_client,
            )

        # Transition to LOADED with converted data
        self._transition_to_loaded(converted_data)

    return self._materialize_field(name)

collections(names_only=False) async

List subcollections beneath this document asynchronously.

Args: names_only: When True, return collection IDs instead of wrappers.

Returns: List of subcollection names or AsyncFireCollection wrappers.

Source code in src/fire_prox/async_fire_object.py
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
async def collections(self, names_only: bool = False) -> List[Any]:
    """
    List subcollections beneath this document asynchronously.

    Args:
        names_only: When True, return collection IDs instead of wrappers.

    Returns:
        List of subcollection names or AsyncFireCollection wrappers.
    """
    self._validate_not_detached("collections()")
    self._validate_not_deleted("collections()")

    results: List[Any] = []
    async for subcollection_ref in self._doc_ref.collections():
        if names_only:
            results.append(subcollection_ref.id)
        else:
            results.append(self.collection(subcollection_ref.id))
    return results

delete(batch=None, *, recursive=True, batch_size=50) async

Delete the document from Firestore asynchronously.

Args: batch: Optional batch object for batched deletes. If provided, the delete will be accumulated in the batch (committed later). recursive: When True (default), delete all subcollections first. batch_size: Batch size to use for recursive subcollection cleanup.

Raises: ValueError: If called on DETACHED object. RuntimeError: If called on DELETED object. ValueError: If recursive deletion is requested while using a batch.

State Transitions: ATTACHED -> DELETED LOADED -> DELETED

Example: user = db.doc('users/alovelace') await user.delete()

# Batch delete
batch = db.batch()
user1.delete(batch=batch, recursive=False)
user2.delete(batch=batch, recursive=False)
await batch.commit()  # Commit all operations
Source code in src/fire_prox/async_fire_object.py
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
async def delete(
    self,
    batch: Optional[Any] = None,
    *,
    recursive: bool = True,
    batch_size: int = 50,
) -> None:
    """
    Delete the document from Firestore asynchronously.

    Args:
        batch: Optional batch object for batched deletes. If provided,
              the delete will be accumulated in the batch (committed later).
        recursive: When True (default), delete all subcollections first.
        batch_size: Batch size to use for recursive subcollection cleanup.

    Raises:
        ValueError: If called on DETACHED object.
        RuntimeError: If called on DELETED object.
        ValueError: If recursive deletion is requested while using a batch.

    State Transitions:
        ATTACHED -> DELETED
        LOADED -> DELETED

    Example:
        user = db.doc('users/alovelace')
        await user.delete()

        # Batch delete
        batch = db.batch()
        user1.delete(batch=batch, recursive=False)
        user2.delete(batch=batch, recursive=False)
        await batch.commit()  # Commit all operations
    """
    if recursive:
        if batch is not None:
            raise ValueError("Cannot delete recursively as part of a batch.")
        if batch_size <= 0:
            raise ValueError(f"batch_size must be positive, got {batch_size}")
        await self._delete_descendant_collections(batch_size=batch_size)

    self._prepare_delete()
    await self._write_delete(batch=batch)
    self._transition_to_deleted()

delete_subcollection(name, *, batch_size=50, recursive=True, dry_run=False) async

Delete a subcollection beneath this document asynchronously.

Args: name: Subcollection name relative to this document. batch_size: Maximum number of deletes per commit. recursive: Whether to delete nested subcollections. dry_run: Count affected documents without executing writes.

Returns: Dictionary with counts for deleted documents and subcollections.

Source code in src/fire_prox/async_fire_object.py
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
async def delete_subcollection(
    self,
    name: str,
    *,
    batch_size: int = 50,
    recursive: bool = True,
    dry_run: bool = False,
) -> Dict[str, int]:
    """
    Delete a subcollection beneath this document asynchronously.

    Args:
        name: Subcollection name relative to this document.
        batch_size: Maximum number of deletes per commit.
        recursive: Whether to delete nested subcollections.
        dry_run: Count affected documents without executing writes.

    Returns:
        Dictionary with counts for deleted documents and subcollections.
    """
    subcollection = self.collection(name)
    return await subcollection.delete_all(
        batch_size=batch_size,
        recursive=recursive,
        dry_run=dry_run,
    )

fetch(force=False, transaction=None) async

Fetch document data from Firestore asynchronously.

Args: force: If True, fetch data even if already LOADED. transaction: Optional transaction object for transactional reads.

Returns: Self, to allow method chaining.

Raises: ValueError: If called on DETACHED object. RuntimeError: If called on DELETED object. NotFound: If document doesn't exist.

State Transitions: ATTACHED -> LOADED LOADED -> LOADED (if force=True)

Example: # Normal fetch user = db.doc('users/alovelace') # ATTACHED await user.fetch() # Now LOADED

# Transactional fetch
transaction = db.transaction()
@firestore.async_transactional
async def read_user(transaction):
    await user.fetch(transaction=transaction)
    return user.credits
credits = await read_user(transaction)
Source code in src/fire_prox/async_fire_object.py
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
async def fetch(self, force: bool = False, transaction: Optional[Any] = None) -> 'AsyncFireObject':
    """
    Fetch document data from Firestore asynchronously.

    Args:
        force: If True, fetch data even if already LOADED.
        transaction: Optional transaction object for transactional reads.

    Returns:
        Self, to allow method chaining.

    Raises:
        ValueError: If called on DETACHED object.
        RuntimeError: If called on DELETED object.
        NotFound: If document doesn't exist.

    State Transitions:
        ATTACHED -> LOADED
        LOADED -> LOADED (if force=True)

    Example:
        # Normal fetch
        user = db.doc('users/alovelace')  # ATTACHED
        await user.fetch()  # Now LOADED

        # Transactional fetch
        transaction = db.transaction()
        @firestore.async_transactional
        async def read_user(transaction):
            await user.fetch(transaction=transaction)
            return user.credits
        credits = await read_user(transaction)
    """
    if self._should_skip_fetch(force):
        return self

    snapshot = await self._get_snapshot(transaction)
    self._process_snapshot(snapshot, is_async=True)

    return self

from_snapshot(snapshot, parent_collection=None, sync_client=None) classmethod

Create an AsyncFireObject from a DocumentSnapshot.

Args: snapshot: DocumentSnapshot from native async API. parent_collection: Optional parent collection reference. sync_client: Optional sync Firestore client for async lazy loading.

Returns: AsyncFireObject in LOADED state.

Raises: ValueError: If snapshot doesn't exist.

Example: async for doc in query.stream(): user = AsyncFireObject.from_snapshot(doc)

Source code in src/fire_prox/async_fire_object.py
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
@classmethod
def from_snapshot(
    cls,
    snapshot: DocumentSnapshot,
    parent_collection: Optional[Any] = None,
    sync_client: Optional[Any] = None
) -> 'AsyncFireObject':
    """
    Create an AsyncFireObject from a DocumentSnapshot.

    Args:
        snapshot: DocumentSnapshot from native async API.
        parent_collection: Optional parent collection reference.
        sync_client: Optional sync Firestore client for async lazy loading.

    Returns:
        AsyncFireObject in LOADED state.

    Raises:
        ValueError: If snapshot doesn't exist.

    Example:
        async for doc in query.stream():
            user = AsyncFireObject.from_snapshot(doc)
    """
    init_data = cls._create_from_snapshot_base(snapshot, parent_collection, sync_client)

    obj = cls(
        doc_ref=init_data['doc_ref'],
        initial_state=init_data['initial_state'],
        parent_collection=init_data['parent_collection'],
        sync_client=sync_client
    )

    object.__setattr__(obj, '_data', init_data['data'])
    # Dirty tracking is already cleared by __init__ and _transition_to_loaded

    return obj

save(doc_id=None, transaction=None, batch=None) async

Save the object's data to Firestore asynchronously.

Args: doc_id: Optional custom document ID for DETACHED objects. transaction: Optional transaction object for transactional writes. batch: Optional batch object for batched writes. If provided, the write will be accumulated in the batch (committed later).

Returns: Self, to allow method chaining.

Raises: RuntimeError: If called on DELETED object. ValueError: If DETACHED without parent_collection, or if trying to create a new document within a transaction or batch.

State Transitions: DETACHED -> LOADED (creates new document) LOADED -> LOADED (updates if dirty)

Example: # Normal save user = collection.new() user.name = 'Ada' await user.save(doc_id='alovelace')

# Transactional save
transaction = db.transaction()
@firestore.async_transactional
async def update_user(transaction):
    await user.fetch(transaction=transaction)
    user.credits += 10
    await user.save(transaction=transaction)
await update_user(transaction)

# Batch save
batch = db.batch()
user1.save(batch=batch)
user2.save(batch=batch)
await batch.commit()  # Commit all operations
Source code in src/fire_prox/async_fire_object.py
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
async def save(
    self,
    doc_id: Optional[str] = None,
    transaction: Optional[Any] = None,
    batch: Optional[Any] = None,
) -> 'AsyncFireObject':
    """
    Save the object's data to Firestore asynchronously.

    Args:
        doc_id: Optional custom document ID for DETACHED objects.
        transaction: Optional transaction object for transactional writes.
        batch: Optional batch object for batched writes. If provided,
              the write will be accumulated in the batch (committed later).

    Returns:
        Self, to allow method chaining.

    Raises:
        RuntimeError: If called on DELETED object.
        ValueError: If DETACHED without parent_collection, or if
                   trying to create a new document within a transaction or batch.

    State Transitions:
        DETACHED -> LOADED (creates new document)
        LOADED -> LOADED (updates if dirty)

    Example:
        # Normal save
        user = collection.new()
        user.name = 'Ada'
        await user.save(doc_id='alovelace')

        # Transactional save
        transaction = db.transaction()
        @firestore.async_transactional
        async def update_user(transaction):
            await user.fetch(transaction=transaction)
            user.credits += 10
            await user.save(transaction=transaction)
        await update_user(transaction)

        # Batch save
        batch = db.batch()
        user1.save(batch=batch)
        user2.save(batch=batch)
        await batch.commit()  # Commit all operations
    """
    self._validate_not_deleted("save()")

    if self._state == State.DETACHED:
        doc_ref, storage_data = self._prepare_detached_save(doc_id, transaction, batch)
        await self._write_set(storage_data, doc_ref=doc_ref)
        object.__setattr__(self, '_state', State.LOADED)
        self._mark_clean()
        return self

    if self._state == State.LOADED:
        if not self.is_dirty():
            return self

        update_dict = self._build_update_dict()
        await self._write_update(update_dict, transaction=transaction, batch=batch)
        self._mark_clean()
        return self

    if self._state == State.ATTACHED:
        storage_data = self._prepare_data_for_storage()
        await self._write_set(storage_data, transaction=transaction, batch=batch)
        object.__setattr__(self, '_state', State.LOADED)
        self._mark_clean()
        return self

    return self

async_fire_query

AsyncFireQuery: Chainable query builder for Firestore (asynchronous).

This module provides the asynchronous AsyncFireQuery class, which wraps native Firestore AsyncQuery objects and provides a chainable interface for building and executing async queries.

AsyncFireQuery

A chainable query builder for Firestore collections (asynchronous).

AsyncFireQuery wraps the native google-cloud-firestore AsyncQuery object and provides a simplified, chainable interface for building and executing async queries. It follows an immutable pattern - each method returns a new AsyncFireQuery instance with the modified query.

This is the asynchronous implementation. For sync queries, use FireQuery.

Usage Examples: # Basic filtering query = users.where('birth_year', '>', 1800) results = await query.get() for user in results: print(user.name)

# Chaining multiple conditions
query = (users
         .where('birth_year', '>', 1800)
         .where('country', '==', 'England')
         .order_by('birth_year')
         .limit(10))
async for user in query.stream():
    print(f"{user.name} - {user.birth_year}")

# Async iteration
async for user in users.where('active', '==', True).stream():
    print(user.name)

Design Note: For complex queries beyond the scope of this builder (e.g., OR queries, advanced filtering), use the native AsyncQuery API directly and hydrate results with AsyncFireObject.from_snapshot():

    native_query = client.collection('users').where(...)
    results = [AsyncFireObject.from_snapshot(snap) async for snap in native_query.stream()]
Source code in src/fire_prox/async_fire_query.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
class AsyncFireQuery:
    """
    A chainable query builder for Firestore collections (asynchronous).

    AsyncFireQuery wraps the native google-cloud-firestore AsyncQuery object and
    provides a simplified, chainable interface for building and executing async
    queries. It follows an immutable pattern - each method returns a new
    AsyncFireQuery instance with the modified query.

    This is the asynchronous implementation. For sync queries, use FireQuery.

    Usage Examples:
        # Basic filtering
        query = users.where('birth_year', '>', 1800)
        results = await query.get()
        for user in results:
            print(user.name)

        # Chaining multiple conditions
        query = (users
                 .where('birth_year', '>', 1800)
                 .where('country', '==', 'England')
                 .order_by('birth_year')
                 .limit(10))
        async for user in query.stream():
            print(f"{user.name} - {user.birth_year}")

        # Async iteration
        async for user in users.where('active', '==', True).stream():
            print(user.name)

    Design Note:
        For complex queries beyond the scope of this builder (e.g., OR queries,
        advanced filtering), use the native AsyncQuery API directly and hydrate
        results with AsyncFireObject.from_snapshot():

            native_query = client.collection('users').where(...)
            results = [AsyncFireObject.from_snapshot(snap) async for snap in native_query.stream()]
    """

    def __init__(
        self,
        native_query: AsyncQuery,
        parent_collection: Optional[Any] = None,
        projection: Optional[tuple] = None,
    ):
        """
        Initialize an AsyncFireQuery.

        Args:
            native_query: The underlying native AsyncQuery object from google-cloud-firestore.
            parent_collection: Optional reference to parent AsyncFireCollection.
            projection: Optional tuple of field paths to project (select specific fields).
        """
        self._query = native_query
        self._parent_collection = parent_collection
        self._projection = projection

    # =========================================================================
    # Query Building Methods (Immutable Pattern)
    # =========================================================================

    def where(self, field: str, op: str, value: Any) -> 'AsyncFireQuery':
        """
        Add a filter condition to the query.

        Creates a new AsyncFireQuery with an additional filter condition.
        Uses the immutable pattern - returns a new instance rather than
        modifying the current query.

        Args:
            field: The field path to filter on (e.g., 'name', 'address.city').
            op: Comparison operator. Supported operators:
                '==' (equal), '!=' (not equal),
                '<' (less than), '<=' (less than or equal),
                '>' (greater than), '>=' (greater than or equal),
                'in' (value in list), 'not-in' (value not in list),
                'array-contains' (array contains value),
                'array-contains-any' (array contains any of the values).
            value: The value to compare against.

        Returns:
            A new AsyncFireQuery instance with the added filter.

        Example:
            # Single condition
            query = users.where('birth_year', '>', 1800)

            # Multiple conditions (chained)
            query = (users
                     .where('birth_year', '>', 1800)
                     .where('country', '==', 'England'))
        """
        # Create FieldFilter and add to query
        filter_obj = FieldFilter(field, op, value)
        new_query = self._query.where(filter=filter_obj)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def order_by(self, field: str, direction: str = 'ASCENDING') -> 'AsyncFireQuery':
        """
        Add an ordering clause to the query.

        Creates a new AsyncFireQuery with ordering by the specified field.

        Args:
            field: The field path to order by.
            direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'.
                      Default is 'ASCENDING'.

        Returns:
            A new AsyncFireQuery instance with the ordering applied.

        Example:
            # Ascending order
            query = users.order_by('birth_year')

            # Descending order
            query = users.order_by('birth_year', direction='DESCENDING')

            # Multiple orderings (chained)
            query = (users
                     .order_by('country')
                     .order_by('birth_year', direction='DESCENDING'))
        """
        # Convert direction string to Query constant
        if direction.upper() == 'ASCENDING':
            from google.cloud.firestore_v1 import Query as QueryClass
            direction_const = QueryClass.ASCENDING
        elif direction.upper() == 'DESCENDING':
            from google.cloud.firestore_v1 import Query as QueryClass
            direction_const = QueryClass.DESCENDING
        else:
            raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

        new_query = self._query.order_by(field, direction=direction_const)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def limit(self, count: int) -> 'AsyncFireQuery':
        """
        Limit the number of results returned.

        Creates a new AsyncFireQuery that will return at most `count` results.

        Args:
            count: Maximum number of documents to return. Must be positive.

        Returns:
            A new AsyncFireQuery instance with the limit applied.

        Raises:
            ValueError: If count is not positive.

        Example:
            # Get top 10 results
            query = users.order_by('score', direction='DESCENDING').limit(10)

            # Get first 5 matching documents
            query = users.where('active', '==', True).limit(5)
        """
        if count <= 0:
            raise ValueError(f"Limit count must be positive, got {count}")

        new_query = self._query.limit(count)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def start_at(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
        """
        Start query results at a cursor position (inclusive).

        Creates a new AsyncFireQuery that starts at the specified cursor. The cursor
        can be a document snapshot or a dictionary of field values matching the
        order_by fields.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot from a previous query
                - Direct field values matching order_by clause order

        Returns:
            A new AsyncFireQuery instance with the start cursor applied.

        Example:
            # Using field values (requires matching order_by)
            query = users.order_by('age').start_at({'age': 25})

            # Pagination: get first page, then start at last document
            page1 = await users.order_by('age').limit(10).get()
            last_age = page1[-1].age
            page2 = await users.order_by('age').start_at({'age': last_age}).limit(10).get()

            # Using a document snapshot
            last_doc_ref = page1[-1]._doc_ref
            last_snapshot = await last_doc_ref.get()
            page2 = await users.order_by('age').start_at(last_snapshot).limit(10).get()
        """
        new_query = self._query.start_at(*document_fields_or_snapshot)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def start_after(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
        """
        Start query results after a cursor position (exclusive).

        Creates a new AsyncFireQuery that starts after the specified cursor. The cursor
        document itself is excluded from results. This is typically used for
        pagination to avoid duplicating the last document from the previous page.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot from a previous query
                - Direct field values matching order_by clause order

        Returns:
            A new AsyncFireQuery instance with the start-after cursor applied.

        Example:
            # Pagination: exclude the last document from previous page
            page1 = await users.order_by('age').limit(10).get()
            last_age = page1[-1].age
            page2 = await users.order_by('age').start_after({'age': last_age}).limit(10).get()

            # Using a document snapshot (common pattern)
            last_doc_ref = page1[-1]._doc_ref
            last_snapshot = await last_doc_ref.get()
            page2 = await users.order_by('age').start_after(last_snapshot).limit(10).get()
        """
        new_query = self._query.start_after(*document_fields_or_snapshot)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def end_at(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
        """
        End query results at a cursor position (inclusive).

        Creates a new AsyncFireQuery that ends at the specified cursor. The cursor
        document is included in the results.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot
                - Direct field values matching order_by clause order

        Returns:
            A new AsyncFireQuery instance with the end cursor applied.

        Example:
            # Get all users up to and including age 50
            query = users.order_by('age').end_at({'age': 50})

            # Using a specific document as endpoint
            target_doc_ref = users.doc('user123')._doc_ref
            target_snapshot = await target_doc_ref.get()
            query = users.order_by('age').end_at(target_snapshot)
        """
        new_query = self._query.end_at(*document_fields_or_snapshot)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def end_before(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
        """
        End query results before a cursor position (exclusive).

        Creates a new AsyncFireQuery that ends before the specified cursor. The cursor
        document itself is excluded from results.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot
                - Direct field values matching order_by clause order

        Returns:
            A new AsyncFireQuery instance with the end-before cursor applied.

        Example:
            # Get all users before age 50 (exclude 50)
            query = users.order_by('age').end_before({'age': 50})

            # Using a specific document as exclusive endpoint
            target_doc_ref = users.doc('user123')._doc_ref
            target_snapshot = await target_doc_ref.get()
            query = users.order_by('age').end_before(target_snapshot)
        """
        new_query = self._query.end_before(*document_fields_or_snapshot)
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    def select(self, *field_paths: str) -> 'AsyncFireQuery':
        """
        Select specific fields to return (projection).

        Creates a new AsyncFireQuery that only returns the specified fields in the
        query results. When using projections, query results will be returned
        as vanilla dictionaries instead of AsyncFireObject instances. Any
        DocumentReferences in the returned dictionaries will be automatically
        converted to AsyncFireObject instances in ATTACHED state.

        Args:
            *field_paths: One or more field paths to select. Field paths can
                         include nested fields using dot notation (e.g., 'address.city').

        Returns:
            A new AsyncFireQuery instance with the projection applied.

        Raises:
            ValueError: If no field paths are provided.

        Example:
            # Select a single field
            query = users.select('name')
            results = await query.get()
            # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

            # Select multiple fields
            query = users.select('name', 'email', 'birth_year')
            results = await query.get()
            # Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

            # Select with filtering and ordering
            query = (users
                     .where('birth_year', '>', 1990)
                     .select('name', 'birth_year')
                     .order_by('birth_year')
                     .limit(10))

            # DocumentReferences are auto-converted to AsyncFireObjects
            query = posts.select('title', 'author')  # author is a DocumentReference
            results = await query.get()
            # results[0]['author'] is an AsyncFireObject, not a DocumentReference
            await results[0]['author'].fetch()
            print(results[0]['author'].name)

        Note:
            - Projection queries return dictionaries, not AsyncFireObject instances
            - Only the selected fields will be present in the returned dictionaries
            - DocumentReferences are automatically hydrated to AsyncFireObject instances
            - Projected results are more bandwidth-efficient for large documents
        """
        if not field_paths:
            raise ValueError("select() requires at least one field path")

        # Create new query with projection
        new_query = self._query.select(list(field_paths))
        return AsyncFireQuery(new_query, self._parent_collection, projection=field_paths)

    def find_nearest(
        self,
        vector_field: str,
        query_vector: Any,
        distance_measure: Any,
        limit: int,
        distance_result_field: Optional[str] = None,
    ) -> 'AsyncFireQuery':
        """
        Find the nearest neighbors based on vector similarity.

        Performs a vector similarity search on top of the current query filters.
        This allows you to combine pre-filtering with vector search (requires
        a composite index).

        Args:
            vector_field: Name of the field containing vector embeddings.
            query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
            distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
                DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
            limit: Maximum number of nearest neighbors to return (max 1000).
            distance_result_field: Optional field name to store the calculated distance
                in the query results.

        Returns:
            A new AsyncFireQuery instance with the vector search applied.

        Example:
            from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
            from google.cloud.firestore_v1.vector import Vector

            # Find nearest neighbors with pre-filtering
            query = (collection
                     .where('category', '==', 'tech')
                     .find_nearest(
                         vector_field="embedding",
                         query_vector=Vector([0.1, 0.2, 0.3]),
                         distance_measure=DistanceMeasure.COSINE,
                         limit=5
                     ))
            async for doc in query.stream():
                print(f"{doc.title}: {doc.category}")

        Note:
            - Requires a composite index when combining with where() clauses
            - Maximum limit is 1000 documents
            - Does not work with Firestore emulator (production only)
        """
        # Create vector query using native find_nearest
        new_query = self._query.find_nearest(
            vector_field=vector_field,
            query_vector=query_vector,
            distance_measure=distance_measure,
            limit=limit,
            distance_result_field=distance_result_field,
        )
        return AsyncFireQuery(new_query, self._parent_collection, self._projection)

    # =========================================================================
    # Aggregation Methods
    # =========================================================================

    async def count(self) -> int:
        """
        Count documents matching the query.

        Executes an aggregation query to count the number of documents that
        match the current query filters without fetching the actual documents.
        This is more efficient than fetching all documents and counting them.

        Returns:
            Integer count of matching documents. Returns 0 if no documents match.

        Example:
            # Count all users
            total_users = await users.count()
            # Returns: 150

            # Count with filters
            active_users = await users.where('active', '==', True).count()
            # Returns: 42

            # Count with complex query
            count = await (users
                          .where('age', '>', 25)
                          .where('country', '==', 'USA')
                          .count())
            # Returns: 37

        Note:
            This uses Firestore's native aggregation API, which is more efficient
            than fetching documents. However, it still counts as one document read
            per 1000 documents in the collection.
        """
        # Create async aggregation query using AsyncQuery's count method
        agg_query = self._query.count(alias='count')

        # Execute and extract result (await the async get method)
        result = await agg_query.get()
        if result and len(result) > 0:
            # Extract count from first (and only) aggregation result
            for agg_result in result:
                return agg_result[0].value
        return 0

    async def sum(self, field: str) -> Union[int, float]:
        """
        Sum a numeric field across all matching documents.

        Executes an aggregation query to sum the values of a specific field
        without fetching the actual documents. The field must contain numeric
        values (int or float).

        Args:
            field: Name of the numeric field to sum.

        Returns:
            Sum of the field values across all matching documents.
            Returns 0 if no documents match or if all values are null.

        Raises:
            ValueError: If field is None or empty.

        Example:
            # Sum all salaries
            total_salary = await employees.sum('salary')
            # Returns: 5000000

            # Sum with filters
            engineering_salary = await (employees
                                       .where('department', '==', 'Engineering')
                                       .sum('salary'))
            # Returns: 2500000

            # Sum revenue from active products
            total_revenue = await (products
                                  .where('active', '==', True)
                                  .sum('revenue'))
            # Returns: 1250000.50

        Note:
            - Null values are ignored in the sum
            - Non-numeric values will cause an error
            - This is more efficient than fetching all documents
        """
        if not field:
            raise ValueError("sum() requires a field name")

        # Create async aggregation query using AsyncQuery's sum method
        agg_query = self._query.sum(field, alias='sum')

        # Execute and extract result (await the async get method)
        result = await agg_query.get()
        if result and len(result) > 0:
            # Extract sum from first (and only) aggregation result
            for agg_result in result:
                return agg_result[0].value
        return 0

    async def avg(self, field: str) -> float:
        """
        Average a numeric field across all matching documents.

        Executes an aggregation query to calculate the arithmetic mean of a
        specific field without fetching the actual documents. The field must
        contain numeric values (int or float).

        Args:
            field: Name of the numeric field to average.

        Returns:
            Average of the field values across all matching documents.
            Returns 0.0 if no documents match or if all values are null.

        Raises:
            ValueError: If field is None or empty.

        Example:
            # Average age of all users
            avg_age = await users.avg('age')
            # Returns: 32.5

            # Average with filters
            avg_salary = await (employees
                               .where('department', '==', 'Engineering')
                               .avg('salary'))
            # Returns: 125000.0

            # Average rating for active products
            avg_rating = await (products
                               .where('active', '==', True)
                               .avg('rating'))
            # Returns: 4.2

        Note:
            - Null values are ignored in the average calculation
            - Non-numeric values will cause an error
            - This is more efficient than fetching all documents
        """
        if not field:
            raise ValueError("avg() requires a field name")

        # Create async aggregation query using AsyncQuery's avg method
        agg_query = self._query.avg(field, alias='avg')

        # Execute and extract result (await the async get method)
        result = await agg_query.get()
        if result and len(result) > 0:
            # Extract average from first (and only) aggregation result
            for agg_result in result:
                value = agg_result[0].value
                return value if value is not None else 0.0
        return 0.0

    async def aggregate(self, **aggregations) -> Dict[str, Any]:
        """
        Perform multiple aggregations in a single query.

        Executes an aggregation query with multiple aggregation operations
        (count, sum, average) without fetching the actual documents. This is
        more efficient than running multiple separate aggregation queries.

        Args:
            **aggregations: Named aggregations using Count(), Sum(field), or
                          Avg(field) from fire_prox.aggregation module.

        Returns:
            Dictionary mapping aggregation names to their results.

        Raises:
            ValueError: If no aggregations are provided or if invalid
                       aggregation types are used.

        Example:
            from fire_prox.aggregation import Count, Sum, Avg

            # Multiple aggregations in one query
            stats = await employees.aggregate(
                total_count=Count(),
                total_salary=Sum('salary'),
                avg_salary=Avg('salary'),
                avg_age=Avg('age')
            )
            # Returns: {
            #     'total_count': 150,
            #     'total_salary': 15000000,
            #     'avg_salary': 100000.0,
            #     'avg_age': 35.2
            # }

            # With filters
            eng_stats = await (employees
                              .where('department', '==', 'Engineering')
                              .aggregate(
                                  count=Count(),
                                  total_salary=Sum('salary')
                              ))
            # Returns: {'count': 50, 'total_salary': 5000000}

            # Financial dashboard
            financials = await (transactions
                               .where('date', '>=', start_date)
                               .aggregate(
                                   total_transactions=Count(),
                                   total_revenue=Sum('amount'),
                                   avg_transaction=Avg('amount')
                               ))

        Note:
            - Much more efficient than multiple separate aggregation queries
            - All aggregations execute in a single round-trip to Firestore
            - Null values are ignored in sum and average calculations
        """
        if not aggregations:
            raise ValueError("aggregate() requires at least one aggregation")

        from .aggregation import Avg, Count, Sum

        # Start with the first aggregation to create the AsyncAggregationQuery
        first_alias, first_agg_type = next(iter(aggregations.items()))

        if isinstance(first_agg_type, Count):
            agg_query = self._query.count(alias=first_alias)
        elif isinstance(first_agg_type, Sum):
            if not first_agg_type.field:
                raise ValueError(f"Sum aggregation '{first_alias}' is missing a field name")
            agg_query = self._query.sum(first_agg_type.field, alias=first_alias)
        elif isinstance(first_agg_type, Avg):
            if not first_agg_type.field:
                raise ValueError(f"Avg aggregation '{first_alias}' is missing a field name")
            agg_query = self._query.avg(first_agg_type.field, alias=first_alias)
        else:
            raise ValueError(
                f"Invalid aggregation type for '{first_alias}': {type(first_agg_type).__name__}. "
                f"Use Count(), Sum(field), or Avg(field)"
            )

        # Add remaining aggregations
        remaining_items = list(aggregations.items())[1:]
        for alias, agg_type in remaining_items:
            if isinstance(agg_type, Count):
                agg_query = agg_query.count(alias=alias)
            elif isinstance(agg_type, Sum):
                if not agg_type.field:
                    raise ValueError(f"Sum aggregation '{alias}' is missing a field name")
                agg_query = agg_query.sum(agg_type.field, alias=alias)
            elif isinstance(agg_type, Avg):
                if not agg_type.field:
                    raise ValueError(f"Avg aggregation '{alias}' is missing a field name")
                agg_query = agg_query.avg(agg_type.field, alias=alias)
            else:
                raise ValueError(
                    f"Invalid aggregation type for '{alias}': {type(agg_type).__name__}. "
                    f"Use Count(), Sum(field), or Avg(field)"
                )

        # Execute and extract results (await the async get method)
        results_dict = {}
        result = await agg_query.get()

        if result and len(result) > 0:
            for agg_result in result:
                # Extract all aggregation results by matching aliases
                for agg in agg_result:
                    value = agg.value
                    # Convert None to 0 for consistency
                    results_dict[agg.alias] = value if value is not None else 0

        return results_dict

    # =========================================================================
    # Helper Methods
    # =========================================================================

    def _convert_projection_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Convert DocumentReferences in projection data to AsyncFireObjects.

        Recursively processes a dictionary to convert any DocumentReference
        or AsyncDocumentReference instances to AsyncFireObject instances in
        ATTACHED state. This allows users to work with references naturally
        using the FireProx API.

        Args:
            data: Dictionary containing projection data from Firestore.

        Returns:
            Dictionary with DocumentReferences converted to AsyncFireObjects.
        """
        from .state import State

        result = {}
        for key, value in data.items():
            if isinstance(value, (DocumentReference, AsyncDocumentReference)):
                # Convert DocumentReference/AsyncDocumentReference to AsyncFireObject in ATTACHED state
                result[key] = AsyncFireObject(
                    doc_ref=value,
                    initial_state=State.ATTACHED,
                    parent_collection=self._parent_collection
                )
            elif isinstance(value, list):
                # Recursively process lists
                result[key] = [
                    AsyncFireObject(
                        doc_ref=item,
                        initial_state=State.ATTACHED,
                        parent_collection=self._parent_collection
                    ) if isinstance(item, (DocumentReference, AsyncDocumentReference))
                    else self._convert_projection_data(item) if isinstance(item, dict)
                    else item
                    for item in value
                ]
            elif isinstance(value, dict):
                # Recursively process nested dictionaries
                result[key] = self._convert_projection_data(value)
            else:
                # Keep primitive values as-is
                result[key] = value
        return result

    # =========================================================================
    # Query Execution Methods
    # =========================================================================

    async def get(self) -> Union[List[AsyncFireObject], List[Dict[str, Any]]]:
        """
        Execute the query and return results as a list.

        Fetches all matching documents asynchronously and hydrates them into
        AsyncFireObject instances in LOADED state. If a projection is active
        (via .select()), returns vanilla dictionaries instead of AsyncFireObject
        instances.

        Returns:
            - If no projection: List of AsyncFireObject instances for all documents
              matching the query.
            - If projection active: List of dictionaries containing only the
              selected fields. DocumentReferences are converted to AsyncFireObjects.
            - Empty list if no documents match.

        Example:
            # Get all results as AsyncFireObjects
            users = await query.get()
            for user in users:
                print(f"{user.name}: {user.birth_year}")

            # Get projected results as dictionaries
            users = await query.select('name', 'email').get()
            for user_dict in users:
                print(f"{user_dict['name']}: {user_dict['email']}")

            # Check if results exist
            results = await query.get()
            if results:
                print(f"Found {len(results)} users")
            else:
                print("No users found")
        """
        # Execute query
        results = []

        # If projection is active, return vanilla dictionaries
        if self._projection:
            async for snap in self._query.stream():
                data = snap.to_dict()
                # Convert DocumentReferences to AsyncFireObjects
                converted_data = self._convert_projection_data(data)
                results.append(converted_data)
            return results

        # Otherwise, return AsyncFireObjects as usual
        async for snapshot in self._query.stream():
            obj = AsyncFireObject.from_snapshot(snapshot, self._parent_collection)
            results.append(obj)
        return results

    async def stream(self) -> Union[AsyncIterator[AsyncFireObject], AsyncIterator[Dict[str, Any]]]:
        """
        Execute the query and stream results as an async iterator.

        Returns an async generator that yields AsyncFireObject instances one at
        a time. This is more memory-efficient than .get() for large result sets
        as it doesn't load all results into memory at once. If a projection
        is active (via .select()), yields vanilla dictionaries instead.

        Yields:
            - If no projection: AsyncFireObject instances in LOADED state for each
              matching document.
            - If projection active: Dictionaries containing only the selected
              fields. DocumentReferences are converted to AsyncFireObjects.

        Example:
            # Stream results one at a time as AsyncFireObjects
            async for user in query.stream():
                print(f"{user.name}: {user.birth_year}")
                # Process each user without loading all users into memory

            # Stream projected results as dictionaries
            async for user_dict in query.select('name', 'email').stream():
                print(f"{user_dict['name']}: {user_dict['email']}")

            # Works with any query
            async for post in (posts
                              .where('published', '==', True)
                              .order_by('date', direction='DESCENDING')
                              .stream()):
                print(post.title)
        """
        # If projection is active, stream vanilla dictionaries
        if self._projection:
            async for snapshot in self._query.stream():
                data = snapshot.to_dict()
                # Convert DocumentReferences to AsyncFireObjects
                converted_data = self._convert_projection_data(data)
                yield converted_data
        else:
            # Otherwise, stream AsyncFireObjects as usual
            async for snapshot in self._query.stream():
                yield AsyncFireObject.from_snapshot(snapshot, self._parent_collection)

    # =========================================================================
    # Real-Time Listeners (Sync-only via sync_client)
    # =========================================================================

    def on_snapshot(self, callback: Any) -> Any:
        """
        Listen for real-time updates to this query.

        This method sets up a real-time listener that fires the callback
        whenever any document matching the query changes. The listener runs
        on a separate thread managed by the Firestore SDK.

        **Important**: This is a sync-only feature. Even for AsyncFireQuery,
        the listener uses a synchronous query (via the parent collection's
        _sync_client) to run on a background thread. This is the standard
        Firestore pattern for real-time listeners in Python.

        Args:
            callback: Callback function invoked on query changes.
                     Signature: callback(query_snapshot, changes, read_time)
                     - query_snapshot: List of DocumentSnapshot objects matching the query
                     - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                     - read_time: Timestamp of the snapshot

        Returns:
            Watch object with an `.unsubscribe()` method to stop listening.

        Example:
            import threading

            callback_done = threading.Event()

            def on_change(query_snapshot, changes, read_time):
                for change in changes:
                    if change.type.name == 'ADDED':
                        print(f"New: {change.document.id}")
                    elif change.type.name == 'MODIFIED':
                        print(f"Modified: {change.document.id}")
                    elif change.type.name == 'REMOVED':
                        print(f"Removed: {change.document.id}")
                callback_done.set()

            # Listen to active users only (async query)
            active_users = users.where('status', '==', 'active')
            watch = active_users.on_snapshot(on_change)

            # Wait for initial snapshot
            callback_done.wait()

            # Later: stop listening
            watch.unsubscribe()

        Note:
            The callback runs on a separate thread. Use threading primitives
            (Event, Lock, Queue) for synchronization with your main thread.
        """
        # Use the native async query's on_snapshot method
        # The Firestore SDK handles the threading internally
        return self._query.on_snapshot(callback)

    def __repr__(self) -> str:
        """Return string representation of the query."""
        return f"<AsyncFireQuery query={self._query}>"

    def __str__(self) -> str:
        """Return human-readable string representation."""
        return f"AsyncFireQuery({self._query})"

__init__(native_query, parent_collection=None, projection=None)

Initialize an AsyncFireQuery.

Args: native_query: The underlying native AsyncQuery object from google-cloud-firestore. parent_collection: Optional reference to parent AsyncFireCollection. projection: Optional tuple of field paths to project (select specific fields).

Source code in src/fire_prox/async_fire_query.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def __init__(
    self,
    native_query: AsyncQuery,
    parent_collection: Optional[Any] = None,
    projection: Optional[tuple] = None,
):
    """
    Initialize an AsyncFireQuery.

    Args:
        native_query: The underlying native AsyncQuery object from google-cloud-firestore.
        parent_collection: Optional reference to parent AsyncFireCollection.
        projection: Optional tuple of field paths to project (select specific fields).
    """
    self._query = native_query
    self._parent_collection = parent_collection
    self._projection = projection

__repr__()

Return string representation of the query.

Source code in src/fire_prox/async_fire_query.py
903
904
905
def __repr__(self) -> str:
    """Return string representation of the query."""
    return f"<AsyncFireQuery query={self._query}>"

__str__()

Return human-readable string representation.

Source code in src/fire_prox/async_fire_query.py
907
908
909
def __str__(self) -> str:
    """Return human-readable string representation."""
    return f"AsyncFireQuery({self._query})"

aggregate(**aggregations) async

Perform multiple aggregations in a single query.

Executes an aggregation query with multiple aggregation operations (count, sum, average) without fetching the actual documents. This is more efficient than running multiple separate aggregation queries.

Args: **aggregations: Named aggregations using Count(), Sum(field), or Avg(field) from fire_prox.aggregation module.

Returns: Dictionary mapping aggregation names to their results.

Raises: ValueError: If no aggregations are provided or if invalid aggregation types are used.

Example: from fire_prox.aggregation import Count, Sum, Avg

# Multiple aggregations in one query
stats = await employees.aggregate(
    total_count=Count(),
    total_salary=Sum('salary'),
    avg_salary=Avg('salary'),
    avg_age=Avg('age')
)
# Returns: {
#     'total_count': 150,
#     'total_salary': 15000000,
#     'avg_salary': 100000.0,
#     'avg_age': 35.2
# }

# With filters
eng_stats = await (employees
                  .where('department', '==', 'Engineering')
                  .aggregate(
                      count=Count(),
                      total_salary=Sum('salary')
                  ))
# Returns: {'count': 50, 'total_salary': 5000000}

# Financial dashboard
financials = await (transactions
                   .where('date', '>=', start_date)
                   .aggregate(
                       total_transactions=Count(),
                       total_revenue=Sum('amount'),
                       avg_transaction=Avg('amount')
                   ))

Note: - Much more efficient than multiple separate aggregation queries - All aggregations execute in a single round-trip to Firestore - Null values are ignored in sum and average calculations

Source code in src/fire_prox/async_fire_query.py
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
async def aggregate(self, **aggregations) -> Dict[str, Any]:
    """
    Perform multiple aggregations in a single query.

    Executes an aggregation query with multiple aggregation operations
    (count, sum, average) without fetching the actual documents. This is
    more efficient than running multiple separate aggregation queries.

    Args:
        **aggregations: Named aggregations using Count(), Sum(field), or
                      Avg(field) from fire_prox.aggregation module.

    Returns:
        Dictionary mapping aggregation names to their results.

    Raises:
        ValueError: If no aggregations are provided or if invalid
                   aggregation types are used.

    Example:
        from fire_prox.aggregation import Count, Sum, Avg

        # Multiple aggregations in one query
        stats = await employees.aggregate(
            total_count=Count(),
            total_salary=Sum('salary'),
            avg_salary=Avg('salary'),
            avg_age=Avg('age')
        )
        # Returns: {
        #     'total_count': 150,
        #     'total_salary': 15000000,
        #     'avg_salary': 100000.0,
        #     'avg_age': 35.2
        # }

        # With filters
        eng_stats = await (employees
                          .where('department', '==', 'Engineering')
                          .aggregate(
                              count=Count(),
                              total_salary=Sum('salary')
                          ))
        # Returns: {'count': 50, 'total_salary': 5000000}

        # Financial dashboard
        financials = await (transactions
                           .where('date', '>=', start_date)
                           .aggregate(
                               total_transactions=Count(),
                               total_revenue=Sum('amount'),
                               avg_transaction=Avg('amount')
                           ))

    Note:
        - Much more efficient than multiple separate aggregation queries
        - All aggregations execute in a single round-trip to Firestore
        - Null values are ignored in sum and average calculations
    """
    if not aggregations:
        raise ValueError("aggregate() requires at least one aggregation")

    from .aggregation import Avg, Count, Sum

    # Start with the first aggregation to create the AsyncAggregationQuery
    first_alias, first_agg_type = next(iter(aggregations.items()))

    if isinstance(first_agg_type, Count):
        agg_query = self._query.count(alias=first_alias)
    elif isinstance(first_agg_type, Sum):
        if not first_agg_type.field:
            raise ValueError(f"Sum aggregation '{first_alias}' is missing a field name")
        agg_query = self._query.sum(first_agg_type.field, alias=first_alias)
    elif isinstance(first_agg_type, Avg):
        if not first_agg_type.field:
            raise ValueError(f"Avg aggregation '{first_alias}' is missing a field name")
        agg_query = self._query.avg(first_agg_type.field, alias=first_alias)
    else:
        raise ValueError(
            f"Invalid aggregation type for '{first_alias}': {type(first_agg_type).__name__}. "
            f"Use Count(), Sum(field), or Avg(field)"
        )

    # Add remaining aggregations
    remaining_items = list(aggregations.items())[1:]
    for alias, agg_type in remaining_items:
        if isinstance(agg_type, Count):
            agg_query = agg_query.count(alias=alias)
        elif isinstance(agg_type, Sum):
            if not agg_type.field:
                raise ValueError(f"Sum aggregation '{alias}' is missing a field name")
            agg_query = agg_query.sum(agg_type.field, alias=alias)
        elif isinstance(agg_type, Avg):
            if not agg_type.field:
                raise ValueError(f"Avg aggregation '{alias}' is missing a field name")
            agg_query = agg_query.avg(agg_type.field, alias=alias)
        else:
            raise ValueError(
                f"Invalid aggregation type for '{alias}': {type(agg_type).__name__}. "
                f"Use Count(), Sum(field), or Avg(field)"
            )

    # Execute and extract results (await the async get method)
    results_dict = {}
    result = await agg_query.get()

    if result and len(result) > 0:
        for agg_result in result:
            # Extract all aggregation results by matching aliases
            for agg in agg_result:
                value = agg.value
                # Convert None to 0 for consistency
                results_dict[agg.alias] = value if value is not None else 0

    return results_dict

avg(field) async

Average a numeric field across all matching documents.

Executes an aggregation query to calculate the arithmetic mean of a specific field without fetching the actual documents. The field must contain numeric values (int or float).

Args: field: Name of the numeric field to average.

Returns: Average of the field values across all matching documents. Returns 0.0 if no documents match or if all values are null.

Raises: ValueError: If field is None or empty.

Example: # Average age of all users avg_age = await users.avg('age') # Returns: 32.5

# Average with filters
avg_salary = await (employees
                   .where('department', '==', 'Engineering')
                   .avg('salary'))
# Returns: 125000.0

# Average rating for active products
avg_rating = await (products
                   .where('active', '==', True)
                   .avg('rating'))
# Returns: 4.2

Note: - Null values are ignored in the average calculation - Non-numeric values will cause an error - This is more efficient than fetching all documents

Source code in src/fire_prox/async_fire_query.py
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
async def avg(self, field: str) -> float:
    """
    Average a numeric field across all matching documents.

    Executes an aggregation query to calculate the arithmetic mean of a
    specific field without fetching the actual documents. The field must
    contain numeric values (int or float).

    Args:
        field: Name of the numeric field to average.

    Returns:
        Average of the field values across all matching documents.
        Returns 0.0 if no documents match or if all values are null.

    Raises:
        ValueError: If field is None or empty.

    Example:
        # Average age of all users
        avg_age = await users.avg('age')
        # Returns: 32.5

        # Average with filters
        avg_salary = await (employees
                           .where('department', '==', 'Engineering')
                           .avg('salary'))
        # Returns: 125000.0

        # Average rating for active products
        avg_rating = await (products
                           .where('active', '==', True)
                           .avg('rating'))
        # Returns: 4.2

    Note:
        - Null values are ignored in the average calculation
        - Non-numeric values will cause an error
        - This is more efficient than fetching all documents
    """
    if not field:
        raise ValueError("avg() requires a field name")

    # Create async aggregation query using AsyncQuery's avg method
    agg_query = self._query.avg(field, alias='avg')

    # Execute and extract result (await the async get method)
    result = await agg_query.get()
    if result and len(result) > 0:
        # Extract average from first (and only) aggregation result
        for agg_result in result:
            value = agg_result[0].value
            return value if value is not None else 0.0
    return 0.0

count() async

Count documents matching the query.

Executes an aggregation query to count the number of documents that match the current query filters without fetching the actual documents. This is more efficient than fetching all documents and counting them.

Returns: Integer count of matching documents. Returns 0 if no documents match.

Example: # Count all users total_users = await users.count() # Returns: 150

# Count with filters
active_users = await users.where('active', '==', True).count()
# Returns: 42

# Count with complex query
count = await (users
              .where('age', '>', 25)
              .where('country', '==', 'USA')
              .count())
# Returns: 37

Note: This uses Firestore's native aggregation API, which is more efficient than fetching documents. However, it still counts as one document read per 1000 documents in the collection.

Source code in src/fire_prox/async_fire_query.py
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
async def count(self) -> int:
    """
    Count documents matching the query.

    Executes an aggregation query to count the number of documents that
    match the current query filters without fetching the actual documents.
    This is more efficient than fetching all documents and counting them.

    Returns:
        Integer count of matching documents. Returns 0 if no documents match.

    Example:
        # Count all users
        total_users = await users.count()
        # Returns: 150

        # Count with filters
        active_users = await users.where('active', '==', True).count()
        # Returns: 42

        # Count with complex query
        count = await (users
                      .where('age', '>', 25)
                      .where('country', '==', 'USA')
                      .count())
        # Returns: 37

    Note:
        This uses Firestore's native aggregation API, which is more efficient
        than fetching documents. However, it still counts as one document read
        per 1000 documents in the collection.
    """
    # Create async aggregation query using AsyncQuery's count method
    agg_query = self._query.count(alias='count')

    # Execute and extract result (await the async get method)
    result = await agg_query.get()
    if result and len(result) > 0:
        # Extract count from first (and only) aggregation result
        for agg_result in result:
            return agg_result[0].value
    return 0

end_at(*document_fields_or_snapshot)

End query results at a cursor position (inclusive).

Creates a new AsyncFireQuery that ends at the specified cursor. The cursor document is included in the results.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot - Direct field values matching order_by clause order

Returns: A new AsyncFireQuery instance with the end cursor applied.

Example: # Get all users up to and including age 50 query = users.order_by('age').end_at({'age': 50})

# Using a specific document as endpoint
target_doc_ref = users.doc('user123')._doc_ref
target_snapshot = await target_doc_ref.get()
query = users.order_by('age').end_at(target_snapshot)
Source code in src/fire_prox/async_fire_query.py
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
def end_at(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
    """
    End query results at a cursor position (inclusive).

    Creates a new AsyncFireQuery that ends at the specified cursor. The cursor
    document is included in the results.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot
            - Direct field values matching order_by clause order

    Returns:
        A new AsyncFireQuery instance with the end cursor applied.

    Example:
        # Get all users up to and including age 50
        query = users.order_by('age').end_at({'age': 50})

        # Using a specific document as endpoint
        target_doc_ref = users.doc('user123')._doc_ref
        target_snapshot = await target_doc_ref.get()
        query = users.order_by('age').end_at(target_snapshot)
    """
    new_query = self._query.end_at(*document_fields_or_snapshot)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

end_before(*document_fields_or_snapshot)

End query results before a cursor position (exclusive).

Creates a new AsyncFireQuery that ends before the specified cursor. The cursor document itself is excluded from results.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot - Direct field values matching order_by clause order

Returns: A new AsyncFireQuery instance with the end-before cursor applied.

Example: # Get all users before age 50 (exclude 50) query = users.order_by('age').end_before({'age': 50})

# Using a specific document as exclusive endpoint
target_doc_ref = users.doc('user123')._doc_ref
target_snapshot = await target_doc_ref.get()
query = users.order_by('age').end_before(target_snapshot)
Source code in src/fire_prox/async_fire_query.py
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def end_before(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
    """
    End query results before a cursor position (exclusive).

    Creates a new AsyncFireQuery that ends before the specified cursor. The cursor
    document itself is excluded from results.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot
            - Direct field values matching order_by clause order

    Returns:
        A new AsyncFireQuery instance with the end-before cursor applied.

    Example:
        # Get all users before age 50 (exclude 50)
        query = users.order_by('age').end_before({'age': 50})

        # Using a specific document as exclusive endpoint
        target_doc_ref = users.doc('user123')._doc_ref
        target_snapshot = await target_doc_ref.get()
        query = users.order_by('age').end_before(target_snapshot)
    """
    new_query = self._query.end_before(*document_fields_or_snapshot)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

find_nearest(vector_field, query_vector, distance_measure, limit, distance_result_field=None)

Find the nearest neighbors based on vector similarity.

Performs a vector similarity search on top of the current query filters. This allows you to combine pre-filtering with vector search (requires a composite index).

Args: vector_field: Name of the field containing vector embeddings. query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector). distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN, DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT). limit: Maximum number of nearest neighbors to return (max 1000). distance_result_field: Optional field name to store the calculated distance in the query results.

Returns: A new AsyncFireQuery instance with the vector search applied.

Example: from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector

# Find nearest neighbors with pre-filtering
query = (collection
         .where('category', '==', 'tech')
         .find_nearest(
             vector_field="embedding",
             query_vector=Vector([0.1, 0.2, 0.3]),
             distance_measure=DistanceMeasure.COSINE,
             limit=5
         ))
async for doc in query.stream():
    print(f"{doc.title}: {doc.category}")

Note: - Requires a composite index when combining with where() clauses - Maximum limit is 1000 documents - Does not work with Firestore emulator (production only)

Source code in src/fire_prox/async_fire_query.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
def find_nearest(
    self,
    vector_field: str,
    query_vector: Any,
    distance_measure: Any,
    limit: int,
    distance_result_field: Optional[str] = None,
) -> 'AsyncFireQuery':
    """
    Find the nearest neighbors based on vector similarity.

    Performs a vector similarity search on top of the current query filters.
    This allows you to combine pre-filtering with vector search (requires
    a composite index).

    Args:
        vector_field: Name of the field containing vector embeddings.
        query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
        distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
            DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
        limit: Maximum number of nearest neighbors to return (max 1000).
        distance_result_field: Optional field name to store the calculated distance
            in the query results.

    Returns:
        A new AsyncFireQuery instance with the vector search applied.

    Example:
        from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
        from google.cloud.firestore_v1.vector import Vector

        # Find nearest neighbors with pre-filtering
        query = (collection
                 .where('category', '==', 'tech')
                 .find_nearest(
                     vector_field="embedding",
                     query_vector=Vector([0.1, 0.2, 0.3]),
                     distance_measure=DistanceMeasure.COSINE,
                     limit=5
                 ))
        async for doc in query.stream():
            print(f"{doc.title}: {doc.category}")

    Note:
        - Requires a composite index when combining with where() clauses
        - Maximum limit is 1000 documents
        - Does not work with Firestore emulator (production only)
    """
    # Create vector query using native find_nearest
    new_query = self._query.find_nearest(
        vector_field=vector_field,
        query_vector=query_vector,
        distance_measure=distance_measure,
        limit=limit,
        distance_result_field=distance_result_field,
    )
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

get() async

Execute the query and return results as a list.

Fetches all matching documents asynchronously and hydrates them into AsyncFireObject instances in LOADED state. If a projection is active (via .select()), returns vanilla dictionaries instead of AsyncFireObject instances.

Returns: - If no projection: List of AsyncFireObject instances for all documents matching the query. - If projection active: List of dictionaries containing only the selected fields. DocumentReferences are converted to AsyncFireObjects. - Empty list if no documents match.

Example: # Get all results as AsyncFireObjects users = await query.get() for user in users: print(f"{user.name}: {user.birth_year}")

# Get projected results as dictionaries
users = await query.select('name', 'email').get()
for user_dict in users:
    print(f"{user_dict['name']}: {user_dict['email']}")

# Check if results exist
results = await query.get()
if results:
    print(f"Found {len(results)} users")
else:
    print("No users found")
Source code in src/fire_prox/async_fire_query.py
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
async def get(self) -> Union[List[AsyncFireObject], List[Dict[str, Any]]]:
    """
    Execute the query and return results as a list.

    Fetches all matching documents asynchronously and hydrates them into
    AsyncFireObject instances in LOADED state. If a projection is active
    (via .select()), returns vanilla dictionaries instead of AsyncFireObject
    instances.

    Returns:
        - If no projection: List of AsyncFireObject instances for all documents
          matching the query.
        - If projection active: List of dictionaries containing only the
          selected fields. DocumentReferences are converted to AsyncFireObjects.
        - Empty list if no documents match.

    Example:
        # Get all results as AsyncFireObjects
        users = await query.get()
        for user in users:
            print(f"{user.name}: {user.birth_year}")

        # Get projected results as dictionaries
        users = await query.select('name', 'email').get()
        for user_dict in users:
            print(f"{user_dict['name']}: {user_dict['email']}")

        # Check if results exist
        results = await query.get()
        if results:
            print(f"Found {len(results)} users")
        else:
            print("No users found")
    """
    # Execute query
    results = []

    # If projection is active, return vanilla dictionaries
    if self._projection:
        async for snap in self._query.stream():
            data = snap.to_dict()
            # Convert DocumentReferences to AsyncFireObjects
            converted_data = self._convert_projection_data(data)
            results.append(converted_data)
        return results

    # Otherwise, return AsyncFireObjects as usual
    async for snapshot in self._query.stream():
        obj = AsyncFireObject.from_snapshot(snapshot, self._parent_collection)
        results.append(obj)
    return results

limit(count)

Limit the number of results returned.

Creates a new AsyncFireQuery that will return at most count results.

Args: count: Maximum number of documents to return. Must be positive.

Returns: A new AsyncFireQuery instance with the limit applied.

Raises: ValueError: If count is not positive.

Example: # Get top 10 results query = users.order_by('score', direction='DESCENDING').limit(10)

# Get first 5 matching documents
query = users.where('active', '==', True).limit(5)
Source code in src/fire_prox/async_fire_query.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
def limit(self, count: int) -> 'AsyncFireQuery':
    """
    Limit the number of results returned.

    Creates a new AsyncFireQuery that will return at most `count` results.

    Args:
        count: Maximum number of documents to return. Must be positive.

    Returns:
        A new AsyncFireQuery instance with the limit applied.

    Raises:
        ValueError: If count is not positive.

    Example:
        # Get top 10 results
        query = users.order_by('score', direction='DESCENDING').limit(10)

        # Get first 5 matching documents
        query = users.where('active', '==', True).limit(5)
    """
    if count <= 0:
        raise ValueError(f"Limit count must be positive, got {count}")

    new_query = self._query.limit(count)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

on_snapshot(callback)

Listen for real-time updates to this query.

This method sets up a real-time listener that fires the callback whenever any document matching the query changes. The listener runs on a separate thread managed by the Firestore SDK.

Important: This is a sync-only feature. Even for AsyncFireQuery, the listener uses a synchronous query (via the parent collection's _sync_client) to run on a background thread. This is the standard Firestore pattern for real-time listeners in Python.

Args: callback: Callback function invoked on query changes. Signature: callback(query_snapshot, changes, read_time) - query_snapshot: List of DocumentSnapshot objects matching the query - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED) - read_time: Timestamp of the snapshot

Returns: Watch object with an .unsubscribe() method to stop listening.

Example: import threading

callback_done = threading.Event()

def on_change(query_snapshot, changes, read_time):
    for change in changes:
        if change.type.name == 'ADDED':
            print(f"New: {change.document.id}")
        elif change.type.name == 'MODIFIED':
            print(f"Modified: {change.document.id}")
        elif change.type.name == 'REMOVED':
            print(f"Removed: {change.document.id}")
    callback_done.set()

# Listen to active users only (async query)
active_users = users.where('status', '==', 'active')
watch = active_users.on_snapshot(on_change)

# Wait for initial snapshot
callback_done.wait()

# Later: stop listening
watch.unsubscribe()

Note: The callback runs on a separate thread. Use threading primitives (Event, Lock, Queue) for synchronization with your main thread.

Source code in src/fire_prox/async_fire_query.py
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
def on_snapshot(self, callback: Any) -> Any:
    """
    Listen for real-time updates to this query.

    This method sets up a real-time listener that fires the callback
    whenever any document matching the query changes. The listener runs
    on a separate thread managed by the Firestore SDK.

    **Important**: This is a sync-only feature. Even for AsyncFireQuery,
    the listener uses a synchronous query (via the parent collection's
    _sync_client) to run on a background thread. This is the standard
    Firestore pattern for real-time listeners in Python.

    Args:
        callback: Callback function invoked on query changes.
                 Signature: callback(query_snapshot, changes, read_time)
                 - query_snapshot: List of DocumentSnapshot objects matching the query
                 - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                 - read_time: Timestamp of the snapshot

    Returns:
        Watch object with an `.unsubscribe()` method to stop listening.

    Example:
        import threading

        callback_done = threading.Event()

        def on_change(query_snapshot, changes, read_time):
            for change in changes:
                if change.type.name == 'ADDED':
                    print(f"New: {change.document.id}")
                elif change.type.name == 'MODIFIED':
                    print(f"Modified: {change.document.id}")
                elif change.type.name == 'REMOVED':
                    print(f"Removed: {change.document.id}")
            callback_done.set()

        # Listen to active users only (async query)
        active_users = users.where('status', '==', 'active')
        watch = active_users.on_snapshot(on_change)

        # Wait for initial snapshot
        callback_done.wait()

        # Later: stop listening
        watch.unsubscribe()

    Note:
        The callback runs on a separate thread. Use threading primitives
        (Event, Lock, Queue) for synchronization with your main thread.
    """
    # Use the native async query's on_snapshot method
    # The Firestore SDK handles the threading internally
    return self._query.on_snapshot(callback)

order_by(field, direction='ASCENDING')

Add an ordering clause to the query.

Creates a new AsyncFireQuery with ordering by the specified field.

Args: field: The field path to order by. direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

Returns: A new AsyncFireQuery instance with the ordering applied.

Example: # Ascending order query = users.order_by('birth_year')

# Descending order
query = users.order_by('birth_year', direction='DESCENDING')

# Multiple orderings (chained)
query = (users
         .order_by('country')
         .order_by('birth_year', direction='DESCENDING'))
Source code in src/fire_prox/async_fire_query.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def order_by(self, field: str, direction: str = 'ASCENDING') -> 'AsyncFireQuery':
    """
    Add an ordering clause to the query.

    Creates a new AsyncFireQuery with ordering by the specified field.

    Args:
        field: The field path to order by.
        direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'.
                  Default is 'ASCENDING'.

    Returns:
        A new AsyncFireQuery instance with the ordering applied.

    Example:
        # Ascending order
        query = users.order_by('birth_year')

        # Descending order
        query = users.order_by('birth_year', direction='DESCENDING')

        # Multiple orderings (chained)
        query = (users
                 .order_by('country')
                 .order_by('birth_year', direction='DESCENDING'))
    """
    # Convert direction string to Query constant
    if direction.upper() == 'ASCENDING':
        from google.cloud.firestore_v1 import Query as QueryClass
        direction_const = QueryClass.ASCENDING
    elif direction.upper() == 'DESCENDING':
        from google.cloud.firestore_v1 import Query as QueryClass
        direction_const = QueryClass.DESCENDING
    else:
        raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

    new_query = self._query.order_by(field, direction=direction_const)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

select(*field_paths)

Select specific fields to return (projection).

Creates a new AsyncFireQuery that only returns the specified fields in the query results. When using projections, query results will be returned as vanilla dictionaries instead of AsyncFireObject instances. Any DocumentReferences in the returned dictionaries will be automatically converted to AsyncFireObject instances in ATTACHED state.

Args: *field_paths: One or more field paths to select. Field paths can include nested fields using dot notation (e.g., 'address.city').

Returns: A new AsyncFireQuery instance with the projection applied.

Raises: ValueError: If no field paths are provided.

Example: # Select a single field query = users.select('name') results = await query.get() # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

# Select multiple fields
query = users.select('name', 'email', 'birth_year')
results = await query.get()
# Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

# Select with filtering and ordering
query = (users
         .where('birth_year', '>', 1990)
         .select('name', 'birth_year')
         .order_by('birth_year')
         .limit(10))

# DocumentReferences are auto-converted to AsyncFireObjects
query = posts.select('title', 'author')  # author is a DocumentReference
results = await query.get()
# results[0]['author'] is an AsyncFireObject, not a DocumentReference
await results[0]['author'].fetch()
print(results[0]['author'].name)

Note: - Projection queries return dictionaries, not AsyncFireObject instances - Only the selected fields will be present in the returned dictionaries - DocumentReferences are automatically hydrated to AsyncFireObject instances - Projected results are more bandwidth-efficient for large documents

Source code in src/fire_prox/async_fire_query.py
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
def select(self, *field_paths: str) -> 'AsyncFireQuery':
    """
    Select specific fields to return (projection).

    Creates a new AsyncFireQuery that only returns the specified fields in the
    query results. When using projections, query results will be returned
    as vanilla dictionaries instead of AsyncFireObject instances. Any
    DocumentReferences in the returned dictionaries will be automatically
    converted to AsyncFireObject instances in ATTACHED state.

    Args:
        *field_paths: One or more field paths to select. Field paths can
                     include nested fields using dot notation (e.g., 'address.city').

    Returns:
        A new AsyncFireQuery instance with the projection applied.

    Raises:
        ValueError: If no field paths are provided.

    Example:
        # Select a single field
        query = users.select('name')
        results = await query.get()
        # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

        # Select multiple fields
        query = users.select('name', 'email', 'birth_year')
        results = await query.get()
        # Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

        # Select with filtering and ordering
        query = (users
                 .where('birth_year', '>', 1990)
                 .select('name', 'birth_year')
                 .order_by('birth_year')
                 .limit(10))

        # DocumentReferences are auto-converted to AsyncFireObjects
        query = posts.select('title', 'author')  # author is a DocumentReference
        results = await query.get()
        # results[0]['author'] is an AsyncFireObject, not a DocumentReference
        await results[0]['author'].fetch()
        print(results[0]['author'].name)

    Note:
        - Projection queries return dictionaries, not AsyncFireObject instances
        - Only the selected fields will be present in the returned dictionaries
        - DocumentReferences are automatically hydrated to AsyncFireObject instances
        - Projected results are more bandwidth-efficient for large documents
    """
    if not field_paths:
        raise ValueError("select() requires at least one field path")

    # Create new query with projection
    new_query = self._query.select(list(field_paths))
    return AsyncFireQuery(new_query, self._parent_collection, projection=field_paths)

start_after(*document_fields_or_snapshot)

Start query results after a cursor position (exclusive).

Creates a new AsyncFireQuery that starts after the specified cursor. The cursor document itself is excluded from results. This is typically used for pagination to avoid duplicating the last document from the previous page.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot from a previous query - Direct field values matching order_by clause order

Returns: A new AsyncFireQuery instance with the start-after cursor applied.

Example: # Pagination: exclude the last document from previous page page1 = await users.order_by('age').limit(10).get() last_age = page1[-1].age page2 = await users.order_by('age').start_after({'age': last_age}).limit(10).get()

# Using a document snapshot (common pattern)
last_doc_ref = page1[-1]._doc_ref
last_snapshot = await last_doc_ref.get()
page2 = await users.order_by('age').start_after(last_snapshot).limit(10).get()
Source code in src/fire_prox/async_fire_query.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
def start_after(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
    """
    Start query results after a cursor position (exclusive).

    Creates a new AsyncFireQuery that starts after the specified cursor. The cursor
    document itself is excluded from results. This is typically used for
    pagination to avoid duplicating the last document from the previous page.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot from a previous query
            - Direct field values matching order_by clause order

    Returns:
        A new AsyncFireQuery instance with the start-after cursor applied.

    Example:
        # Pagination: exclude the last document from previous page
        page1 = await users.order_by('age').limit(10).get()
        last_age = page1[-1].age
        page2 = await users.order_by('age').start_after({'age': last_age}).limit(10).get()

        # Using a document snapshot (common pattern)
        last_doc_ref = page1[-1]._doc_ref
        last_snapshot = await last_doc_ref.get()
        page2 = await users.order_by('age').start_after(last_snapshot).limit(10).get()
    """
    new_query = self._query.start_after(*document_fields_or_snapshot)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

start_at(*document_fields_or_snapshot)

Start query results at a cursor position (inclusive).

Creates a new AsyncFireQuery that starts at the specified cursor. The cursor can be a document snapshot or a dictionary of field values matching the order_by fields.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot from a previous query - Direct field values matching order_by clause order

Returns: A new AsyncFireQuery instance with the start cursor applied.

Example: # Using field values (requires matching order_by) query = users.order_by('age').start_at({'age': 25})

# Pagination: get first page, then start at last document
page1 = await users.order_by('age').limit(10).get()
last_age = page1[-1].age
page2 = await users.order_by('age').start_at({'age': last_age}).limit(10).get()

# Using a document snapshot
last_doc_ref = page1[-1]._doc_ref
last_snapshot = await last_doc_ref.get()
page2 = await users.order_by('age').start_at(last_snapshot).limit(10).get()
Source code in src/fire_prox/async_fire_query.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
def start_at(self, *document_fields_or_snapshot) -> 'AsyncFireQuery':
    """
    Start query results at a cursor position (inclusive).

    Creates a new AsyncFireQuery that starts at the specified cursor. The cursor
    can be a document snapshot or a dictionary of field values matching the
    order_by fields.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot from a previous query
            - Direct field values matching order_by clause order

    Returns:
        A new AsyncFireQuery instance with the start cursor applied.

    Example:
        # Using field values (requires matching order_by)
        query = users.order_by('age').start_at({'age': 25})

        # Pagination: get first page, then start at last document
        page1 = await users.order_by('age').limit(10).get()
        last_age = page1[-1].age
        page2 = await users.order_by('age').start_at({'age': last_age}).limit(10).get()

        # Using a document snapshot
        last_doc_ref = page1[-1]._doc_ref
        last_snapshot = await last_doc_ref.get()
        page2 = await users.order_by('age').start_at(last_snapshot).limit(10).get()
    """
    new_query = self._query.start_at(*document_fields_or_snapshot)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

stream() async

Execute the query and stream results as an async iterator.

Returns an async generator that yields AsyncFireObject instances one at a time. This is more memory-efficient than .get() for large result sets as it doesn't load all results into memory at once. If a projection is active (via .select()), yields vanilla dictionaries instead.

Yields: - If no projection: AsyncFireObject instances in LOADED state for each matching document. - If projection active: Dictionaries containing only the selected fields. DocumentReferences are converted to AsyncFireObjects.

Example: # Stream results one at a time as AsyncFireObjects async for user in query.stream(): print(f"{user.name}: {user.birth_year}") # Process each user without loading all users into memory

# Stream projected results as dictionaries
async for user_dict in query.select('name', 'email').stream():
    print(f"{user_dict['name']}: {user_dict['email']}")

# Works with any query
async for post in (posts
                  .where('published', '==', True)
                  .order_by('date', direction='DESCENDING')
                  .stream()):
    print(post.title)
Source code in src/fire_prox/async_fire_query.py
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
async def stream(self) -> Union[AsyncIterator[AsyncFireObject], AsyncIterator[Dict[str, Any]]]:
    """
    Execute the query and stream results as an async iterator.

    Returns an async generator that yields AsyncFireObject instances one at
    a time. This is more memory-efficient than .get() for large result sets
    as it doesn't load all results into memory at once. If a projection
    is active (via .select()), yields vanilla dictionaries instead.

    Yields:
        - If no projection: AsyncFireObject instances in LOADED state for each
          matching document.
        - If projection active: Dictionaries containing only the selected
          fields. DocumentReferences are converted to AsyncFireObjects.

    Example:
        # Stream results one at a time as AsyncFireObjects
        async for user in query.stream():
            print(f"{user.name}: {user.birth_year}")
            # Process each user without loading all users into memory

        # Stream projected results as dictionaries
        async for user_dict in query.select('name', 'email').stream():
            print(f"{user_dict['name']}: {user_dict['email']}")

        # Works with any query
        async for post in (posts
                          .where('published', '==', True)
                          .order_by('date', direction='DESCENDING')
                          .stream()):
            print(post.title)
    """
    # If projection is active, stream vanilla dictionaries
    if self._projection:
        async for snapshot in self._query.stream():
            data = snapshot.to_dict()
            # Convert DocumentReferences to AsyncFireObjects
            converted_data = self._convert_projection_data(data)
            yield converted_data
    else:
        # Otherwise, stream AsyncFireObjects as usual
        async for snapshot in self._query.stream():
            yield AsyncFireObject.from_snapshot(snapshot, self._parent_collection)

sum(field) async

Sum a numeric field across all matching documents.

Executes an aggregation query to sum the values of a specific field without fetching the actual documents. The field must contain numeric values (int or float).

Args: field: Name of the numeric field to sum.

Returns: Sum of the field values across all matching documents. Returns 0 if no documents match or if all values are null.

Raises: ValueError: If field is None or empty.

Example: # Sum all salaries total_salary = await employees.sum('salary') # Returns: 5000000

# Sum with filters
engineering_salary = await (employees
                           .where('department', '==', 'Engineering')
                           .sum('salary'))
# Returns: 2500000

# Sum revenue from active products
total_revenue = await (products
                      .where('active', '==', True)
                      .sum('revenue'))
# Returns: 1250000.50

Note: - Null values are ignored in the sum - Non-numeric values will cause an error - This is more efficient than fetching all documents

Source code in src/fire_prox/async_fire_query.py
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
async def sum(self, field: str) -> Union[int, float]:
    """
    Sum a numeric field across all matching documents.

    Executes an aggregation query to sum the values of a specific field
    without fetching the actual documents. The field must contain numeric
    values (int or float).

    Args:
        field: Name of the numeric field to sum.

    Returns:
        Sum of the field values across all matching documents.
        Returns 0 if no documents match or if all values are null.

    Raises:
        ValueError: If field is None or empty.

    Example:
        # Sum all salaries
        total_salary = await employees.sum('salary')
        # Returns: 5000000

        # Sum with filters
        engineering_salary = await (employees
                                   .where('department', '==', 'Engineering')
                                   .sum('salary'))
        # Returns: 2500000

        # Sum revenue from active products
        total_revenue = await (products
                              .where('active', '==', True)
                              .sum('revenue'))
        # Returns: 1250000.50

    Note:
        - Null values are ignored in the sum
        - Non-numeric values will cause an error
        - This is more efficient than fetching all documents
    """
    if not field:
        raise ValueError("sum() requires a field name")

    # Create async aggregation query using AsyncQuery's sum method
    agg_query = self._query.sum(field, alias='sum')

    # Execute and extract result (await the async get method)
    result = await agg_query.get()
    if result and len(result) > 0:
        # Extract sum from first (and only) aggregation result
        for agg_result in result:
            return agg_result[0].value
    return 0

where(field, op, value)

Add a filter condition to the query.

Creates a new AsyncFireQuery with an additional filter condition. Uses the immutable pattern - returns a new instance rather than modifying the current query.

Args: field: The field path to filter on (e.g., 'name', 'address.city'). op: Comparison operator. Supported operators: '==' (equal), '!=' (not equal), '<' (less than), '<=' (less than or equal), '>' (greater than), '>=' (greater than or equal), 'in' (value in list), 'not-in' (value not in list), 'array-contains' (array contains value), 'array-contains-any' (array contains any of the values). value: The value to compare against.

Returns: A new AsyncFireQuery instance with the added filter.

Example: # Single condition query = users.where('birth_year', '>', 1800)

# Multiple conditions (chained)
query = (users
         .where('birth_year', '>', 1800)
         .where('country', '==', 'England'))
Source code in src/fire_prox/async_fire_query.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
def where(self, field: str, op: str, value: Any) -> 'AsyncFireQuery':
    """
    Add a filter condition to the query.

    Creates a new AsyncFireQuery with an additional filter condition.
    Uses the immutable pattern - returns a new instance rather than
    modifying the current query.

    Args:
        field: The field path to filter on (e.g., 'name', 'address.city').
        op: Comparison operator. Supported operators:
            '==' (equal), '!=' (not equal),
            '<' (less than), '<=' (less than or equal),
            '>' (greater than), '>=' (greater than or equal),
            'in' (value in list), 'not-in' (value not in list),
            'array-contains' (array contains value),
            'array-contains-any' (array contains any of the values).
        value: The value to compare against.

    Returns:
        A new AsyncFireQuery instance with the added filter.

    Example:
        # Single condition
        query = users.where('birth_year', '>', 1800)

        # Multiple conditions (chained)
        query = (users
                 .where('birth_year', '>', 1800)
                 .where('country', '==', 'England'))
    """
    # Create FieldFilter and add to query
    filter_obj = FieldFilter(field, op, value)
    new_query = self._query.where(filter=filter_obj)
    return AsyncFireQuery(new_query, self._parent_collection, self._projection)

async_fireprox

AsyncFireProx: Main entry point for async FireProx usage.

This module provides the AsyncFireProx class, which serves as the primary interface for users to interact with Firestore asynchronously through the FireProx API.

AsyncFireProx

Bases: BaseFireProx

Main entry point for the async FireProx library.

AsyncFireProx wraps the native google-cloud-firestore AsyncClient and provides a simplified, Pythonic interface for working with Firestore asynchronously.

Usage Examples: # Initialize with a pre-configured native async client from google.cloud import firestore from fire_prox import AsyncFireProx

native_client = firestore.AsyncClient(project='my-project')
db = AsyncFireProx(native_client)

# Access a document (ATTACHED state)
user = db.doc('users/alovelace')
await user.fetch()
print(user.name)

# Create a new document
users = db.collection('users')
new_user = users.new()
new_user.name = 'Charles Babbage'
new_user.year = 1791
await new_user.save()

# Update a document
user = db.doc('users/alovelace')
await user.fetch()
user.year = 1816
await user.save()

# Delete a document
await user.delete()
Source code in src/fire_prox/async_fireprox.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
class AsyncFireProx(BaseFireProx):
    """
    Main entry point for the async FireProx library.

    AsyncFireProx wraps the native google-cloud-firestore AsyncClient and provides
    a simplified, Pythonic interface for working with Firestore asynchronously.

    Usage Examples:
        # Initialize with a pre-configured native async client
        from google.cloud import firestore
        from fire_prox import AsyncFireProx

        native_client = firestore.AsyncClient(project='my-project')
        db = AsyncFireProx(native_client)

        # Access a document (ATTACHED state)
        user = db.doc('users/alovelace')
        await user.fetch()
        print(user.name)

        # Create a new document
        users = db.collection('users')
        new_user = users.new()
        new_user.name = 'Charles Babbage'
        new_user.year = 1791
        await new_user.save()

        # Update a document
        user = db.doc('users/alovelace')
        await user.fetch()
        user.year = 1816
        await user.save()

        # Delete a document
        await user.delete()
    """

    def __init__(self, client: AsyncFirestoreClient):
        """
        Initialize AsyncFireProx with a native async Firestore client.

        Args:
            client: A configured google.cloud.firestore.AsyncClient instance.
                   Authentication and project configuration should be handled
                   before creating this instance.

        Raises:
            TypeError: If client is not a google.cloud.firestore.AsyncClient.

        Example:
            from google.cloud import firestore
            from fire_prox import AsyncFireProx

            # Option 1: Default credentials
            native_client = firestore.AsyncClient()

            # Option 2: Explicit project
            native_client = firestore.AsyncClient(project='my-project-id')

            # Initialize AsyncFireProx
            db = AsyncFireProx(native_client)
        """
        if not isinstance(client, AsyncFirestoreClient):
            raise TypeError(
                f"client must be a google.cloud.firestore.AsyncClient, "
                f"got {type(client)}"
            )

        super().__init__(client)

            # Create companion sync client for lazy loading
        # Both clients point to the same Firestore backend
        from google.cloud import firestore
        self._sync_client = firestore.Client(
            project=client.project,
            database=client._database
        )

    # =========================================================================
    # Document Access
    # =========================================================================

    def doc(self, path: str) -> AsyncFireObject:
        """
        Get a reference to a document by its full path.

        Creates an AsyncFireObject in ATTACHED state. No data is fetched from
        Firestore until fetch() is called or an attribute is accessed (lazy loading).

        Args:
            path: The full document path, e.g., 'users/alovelace' or
                 'users/uid/posts/post123'. Must be a valid Firestore
                 document path with an even number of segments.

        Returns:
            An AsyncFireObject instance in ATTACHED state.

        Raises:
            ValueError: If path has an odd number of segments.

        Example:
            # Root-level document with lazy loading
            user = db.doc('users/alovelace')
            print(user.name)  # Triggers automatic fetch

            # Or explicit fetch
            user = db.doc('users/alovelace')
            await user.fetch()
            print(user.name)

            # Nested document (subcollection)
            post = db.doc('users/alovelace/posts/post123')
            await post.fetch()
        """
        return self._create_document_proxy(path, AsyncFireObject)

    def document(self, path: str) -> AsyncFireObject:
        """
        Alias for doc(). Get a reference to a document by its full path.

        Args:
            path: The full document path.

        Returns:
            An AsyncFireObject instance in ATTACHED state.
        """
        return self.doc(path)

    # =========================================================================
    # Collection Access
    # =========================================================================

    def collection(self, path: str) -> AsyncFireCollection:
        """
        Get a reference to a collection by its path.

        Creates an AsyncFireCollection wrapper around the native
        AsyncCollectionReference.

        Args:
            path: The collection path, e.g., 'users' or 'users/uid/posts'.
                 Must have an odd number of segments.

        Returns:
            An AsyncFireCollection instance.

        Raises:
            ValueError: If path has an even number of segments.

        Example:
            # Root-level collection
            users = db.collection('users')
            new_user = users.new()
            new_user.name = 'Ada'
            await new_user.save()

            # Subcollection
            posts = db.collection('users/alovelace/posts')
            new_post = posts.new()
            new_post.title = 'Analysis Engine'
            await new_post.save()
        """
        return self._create_collection_proxy(path, AsyncFireCollection)

    async def collections(self, path: str, *, names_only: bool = False) -> list[Any]:
        """
        List subcollections beneath the specified document path asynchronously.

        Args:
            path: Document path whose subcollections should be listed.
            names_only: Return collection IDs instead of AsyncFireCollection wrappers.

        Returns:
            List of subcollection names or AsyncFireCollection wrappers.
        """
        document = self.doc(path)
        return await document.collections(names_only=names_only)

    def _get_document_kwargs(self, path: str) -> Dict[str, Any]:
        sync_doc_ref = self._sync_client.document(path)
        return {'sync_doc_ref': sync_doc_ref, 'sync_client': self._sync_client}

    def _get_collection_kwargs(self, path: str) -> Dict[str, Any]:
        return {'sync_client': self._sync_client}

__init__(client)

Initialize AsyncFireProx with a native async Firestore client.

Args: client: A configured google.cloud.firestore.AsyncClient instance. Authentication and project configuration should be handled before creating this instance.

Raises: TypeError: If client is not a google.cloud.firestore.AsyncClient.

Example: from google.cloud import firestore from fire_prox import AsyncFireProx

# Option 1: Default credentials
native_client = firestore.AsyncClient()

# Option 2: Explicit project
native_client = firestore.AsyncClient(project='my-project-id')

# Initialize AsyncFireProx
db = AsyncFireProx(native_client)
Source code in src/fire_prox/async_fireprox.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def __init__(self, client: AsyncFirestoreClient):
    """
    Initialize AsyncFireProx with a native async Firestore client.

    Args:
        client: A configured google.cloud.firestore.AsyncClient instance.
               Authentication and project configuration should be handled
               before creating this instance.

    Raises:
        TypeError: If client is not a google.cloud.firestore.AsyncClient.

    Example:
        from google.cloud import firestore
        from fire_prox import AsyncFireProx

        # Option 1: Default credentials
        native_client = firestore.AsyncClient()

        # Option 2: Explicit project
        native_client = firestore.AsyncClient(project='my-project-id')

        # Initialize AsyncFireProx
        db = AsyncFireProx(native_client)
    """
    if not isinstance(client, AsyncFirestoreClient):
        raise TypeError(
            f"client must be a google.cloud.firestore.AsyncClient, "
            f"got {type(client)}"
        )

    super().__init__(client)

        # Create companion sync client for lazy loading
    # Both clients point to the same Firestore backend
    from google.cloud import firestore
    self._sync_client = firestore.Client(
        project=client.project,
        database=client._database
    )

collection(path)

Get a reference to a collection by its path.

Creates an AsyncFireCollection wrapper around the native AsyncCollectionReference.

Args: path: The collection path, e.g., 'users' or 'users/uid/posts'. Must have an odd number of segments.

Returns: An AsyncFireCollection instance.

Raises: ValueError: If path has an even number of segments.

Example: # Root-level collection users = db.collection('users') new_user = users.new() new_user.name = 'Ada' await new_user.save()

# Subcollection
posts = db.collection('users/alovelace/posts')
new_post = posts.new()
new_post.title = 'Analysis Engine'
await new_post.save()
Source code in src/fire_prox/async_fireprox.py
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
def collection(self, path: str) -> AsyncFireCollection:
    """
    Get a reference to a collection by its path.

    Creates an AsyncFireCollection wrapper around the native
    AsyncCollectionReference.

    Args:
        path: The collection path, e.g., 'users' or 'users/uid/posts'.
             Must have an odd number of segments.

    Returns:
        An AsyncFireCollection instance.

    Raises:
        ValueError: If path has an even number of segments.

    Example:
        # Root-level collection
        users = db.collection('users')
        new_user = users.new()
        new_user.name = 'Ada'
        await new_user.save()

        # Subcollection
        posts = db.collection('users/alovelace/posts')
        new_post = posts.new()
        new_post.title = 'Analysis Engine'
        await new_post.save()
    """
    return self._create_collection_proxy(path, AsyncFireCollection)

collections(path, *, names_only=False) async

List subcollections beneath the specified document path asynchronously.

Args: path: Document path whose subcollections should be listed. names_only: Return collection IDs instead of AsyncFireCollection wrappers.

Returns: List of subcollection names or AsyncFireCollection wrappers.

Source code in src/fire_prox/async_fireprox.py
181
182
183
184
185
186
187
188
189
190
191
192
193
async def collections(self, path: str, *, names_only: bool = False) -> list[Any]:
    """
    List subcollections beneath the specified document path asynchronously.

    Args:
        path: Document path whose subcollections should be listed.
        names_only: Return collection IDs instead of AsyncFireCollection wrappers.

    Returns:
        List of subcollection names or AsyncFireCollection wrappers.
    """
    document = self.doc(path)
    return await document.collections(names_only=names_only)

doc(path)

Get a reference to a document by its full path.

Creates an AsyncFireObject in ATTACHED state. No data is fetched from Firestore until fetch() is called or an attribute is accessed (lazy loading).

Args: path: The full document path, e.g., 'users/alovelace' or 'users/uid/posts/post123'. Must be a valid Firestore document path with an even number of segments.

Returns: An AsyncFireObject instance in ATTACHED state.

Raises: ValueError: If path has an odd number of segments.

Example: # Root-level document with lazy loading user = db.doc('users/alovelace') print(user.name) # Triggers automatic fetch

# Or explicit fetch
user = db.doc('users/alovelace')
await user.fetch()
print(user.name)

# Nested document (subcollection)
post = db.doc('users/alovelace/posts/post123')
await post.fetch()
Source code in src/fire_prox/async_fireprox.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def doc(self, path: str) -> AsyncFireObject:
    """
    Get a reference to a document by its full path.

    Creates an AsyncFireObject in ATTACHED state. No data is fetched from
    Firestore until fetch() is called or an attribute is accessed (lazy loading).

    Args:
        path: The full document path, e.g., 'users/alovelace' or
             'users/uid/posts/post123'. Must be a valid Firestore
             document path with an even number of segments.

    Returns:
        An AsyncFireObject instance in ATTACHED state.

    Raises:
        ValueError: If path has an odd number of segments.

    Example:
        # Root-level document with lazy loading
        user = db.doc('users/alovelace')
        print(user.name)  # Triggers automatic fetch

        # Or explicit fetch
        user = db.doc('users/alovelace')
        await user.fetch()
        print(user.name)

        # Nested document (subcollection)
        post = db.doc('users/alovelace/posts/post123')
        await post.fetch()
    """
    return self._create_document_proxy(path, AsyncFireObject)

document(path)

Alias for doc(). Get a reference to a document by its full path.

Args: path: The full document path.

Returns: An AsyncFireObject instance in ATTACHED state.

Source code in src/fire_prox/async_fireprox.py
133
134
135
136
137
138
139
140
141
142
143
def document(self, path: str) -> AsyncFireObject:
    """
    Alias for doc(). Get a reference to a document by its full path.

    Args:
        path: The full document path.

    Returns:
        An AsyncFireObject instance in ATTACHED state.
    """
    return self.doc(path)

base_fire_collection

BaseFireCollection: Shared logic for sync and async FireCollection implementations.

This module contains the base class that implements all logic that is identical between synchronous and asynchronous FireCollection implementations.

BaseFireCollection

Base class for FireCollection implementations (sync and async).

Contains all shared logic: - Initialization - Properties (id, path) - String representations

Subclasses must implement: - _instantiate_object() - creates FireObject/AsyncFireObject - Query methods (Phase 2)

Source code in src/fire_prox/base_fire_collection.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
class BaseFireCollection:
    """
    Base class for FireCollection implementations (sync and async).

    Contains all shared logic:
    - Initialization
    - Properties (id, path)
    - String representations

    Subclasses must implement:
    - _instantiate_object() - creates FireObject/AsyncFireObject
    - Query methods (Phase 2)
    """

    def __init__(
        self,
        collection_ref: Any,  # CollectionReference or AsyncCollectionReference
        client: Optional[Any] = None,
        sync_client: Optional[Any] = None
    ):
        """
        Initialize a FireCollection.

        Args:
            collection_ref: The underlying CollectionReference from
                           google-cloud-firestore.
            client: Optional reference to the parent FireProx client.
            sync_client: Optional sync Firestore client for lazy loading (async only).
        """
        self._collection_ref = collection_ref
        self._client = client
        self._sync_client = sync_client

    # =========================================================================
    # Document Factories (SHARED)
    # =========================================================================

    def _instantiate_object(
        self,
        *,
        doc_ref: Any,
        initial_state: State,
        parent_collection: 'BaseFireCollection',
        **kwargs: Any,
    ) -> Any:
        """Create a collection-backed document instance."""
        raise NotImplementedError

    def _get_new_kwargs(self) -> Dict[str, Any]:
        """Return extra kwargs for instantiating DETACHED objects."""
        return {}

    def _get_doc_kwargs(self, doc_id: str) -> Dict[str, Any]:
        """Return extra kwargs for instantiating ATTACHED objects."""
        return {}

    # -------------------------------------------------------------------------
    # Shared validation helpers
    # -------------------------------------------------------------------------

    def _validate_batch_size(self, batch_size: int) -> None:
        """
        Validate that a batch size is a positive integer.

        Args:
            batch_size: Proposed batch size to validate.

        Raises:
            ValueError: If batch_size is not a positive integer.
        """
        if batch_size <= 0:
            raise ValueError(f"batch_size must be positive, got {batch_size}")

    def new(self) -> Any:
        """Create a new document proxy in DETACHED state."""
        return self._instantiate_object(
            doc_ref=None,
            initial_state=State.DETACHED,
            parent_collection=self,
            **self._get_new_kwargs(),
        )

    def doc(self, doc_id: str) -> Any:
        """Create a document proxy in ATTACHED state."""
        doc_ref = self._collection_ref.document(doc_id)
        return self._instantiate_object(
            doc_ref=doc_ref,
            initial_state=State.ATTACHED,
            parent_collection=self,
            **self._get_doc_kwargs(doc_id),
        )

    # =========================================================================
    # Transaction Support (SHARED)
    # =========================================================================

    def transaction(self) -> Any:
        """
        Create a transaction for atomic read-modify-write operations.

        Convenience method for creating transactions directly from a collection
        reference, eliminating the need to access the root FireProx client.

        Returns:
            A native google.cloud.firestore.Transaction or
            google.cloud.firestore.AsyncTransaction instance.

        Example:
            users = db.collection('users')
            transaction = users.transaction()

            @firestore.transactional
            def update_user(transaction, user_id):
                user = users.doc(user_id)
                user.fetch(transaction=transaction)
                user.visits += 1
                user.save(transaction=transaction)

            update_user(transaction, 'alice')
        """
        return self._client.transaction()

    def batch(self) -> Any:
        """
        Create a batch for accumulating multiple write operations.

        Convenience method for creating batches directly from a collection
        reference, eliminating the need to access the root FireProx client.

        Returns:
            A native google.cloud.firestore.WriteBatch or
            google.cloud.firestore.AsyncWriteBatch instance.

        Example:
            users = db.collection('users')
            batch = users.batch()

            # Accumulate operations
            user1 = users.doc('alice')
            user1.name = 'Alice'
            user1.save(batch=batch)

            user2 = users.doc('bob')
            user2.name = 'Bob'
            user2.save(batch=batch)

            # Commit all operations atomically
            batch.commit()

        Note:
            See BaseFireProx.batch() for detailed documentation on batch operations.
        """
        return self._client.batch()

    # =========================================================================
    # Properties (SHARED)
    # =========================================================================

    @property
    def id(self) -> str:
        """
        Get the collection ID (last segment of collection path).

        Returns:
            The collection ID string.
        """
        return self._collection_ref.id

    @property
    def path(self) -> str:
        """
        Get the full Firestore path of the collection.

        Returns:
            The full path string (e.g., 'users' or 'users/uid/posts').
        """
        # _path is a tuple, convert to slash-separated string
        return '/'.join(self._collection_ref._path)

    # =========================================================================
    # Special Methods (SHARED)
    # =========================================================================

    def __repr__(self) -> str:
        """
        Return a detailed string representation for debugging.

        Returns:
            String showing collection path.
        """
        return f"<{type(self).__name__} path='{self.path}'>"

    def __str__(self) -> str:
        """
        Return a human-readable string representation.

        Returns:
            String showing the collection path.
        """
        return f"{type(self).__name__}({self.path})"

    # =========================================================================
    # Real-Time Listeners (Sync-only)
    # =========================================================================

    def on_snapshot(self, callback: Any) -> Any:
        """
        Listen for real-time updates to this collection.

        This method sets up a real-time listener that fires the callback
        whenever any document in the collection changes. The listener runs
        on a separate thread managed by the Firestore SDK.

        **Important**: This is a sync-only feature. Even for AsyncFireCollection
        instances, the listener uses the synchronous client (via _sync_client)
        to run on a background thread. This is the standard Firestore pattern
        for real-time listeners in Python.

        Args:
            callback: Callback function invoked on collection changes.
                     Signature: callback(col_snapshot, changes, read_time)
                     - col_snapshot: List of DocumentSnapshot objects
                     - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                     - read_time: Timestamp of the snapshot

        Returns:
            Watch object with an `.unsubscribe()` method to stop listening.

        Example:
            import threading

            callback_done = threading.Event()

            def on_change(col_snapshot, changes, read_time):
                for change in changes:
                    if change.type.name == 'ADDED':
                        print(f"New document: {change.document.id}")
                    elif change.type.name == 'MODIFIED':
                        print(f"Modified document: {change.document.id}")
                    elif change.type.name == 'REMOVED':
                        print(f"Removed document: {change.document.id}")
                callback_done.set()

            # Start listening to a collection
            users = db.collection('users')
            watch = users.on_snapshot(on_change)

            # Wait for initial snapshot
            callback_done.wait()

            # Later: stop listening
            watch.unsubscribe()

        Note:
            The callback runs on a separate thread. Use threading primitives
            (Event, Lock, Queue) for synchronization with your main thread.
        """
        # For sync FireCollection, use _collection_ref directly
        # For async FireCollection, use _sync_client to create sync ref
        if hasattr(self, '_sync_client') and self._sync_client is not None:
            # AsyncFireCollection: create sync collection ref
            collection_ref = self._sync_client.collection(self.path)
        else:
            # FireCollection: use regular collection ref
            collection_ref = self._collection_ref

        # Set up the listener
        return collection_ref.on_snapshot(callback)

id property

Get the collection ID (last segment of collection path).

Returns: The collection ID string.

path property

Get the full Firestore path of the collection.

Returns: The full path string (e.g., 'users' or 'users/uid/posts').

__init__(collection_ref, client=None, sync_client=None)

Initialize a FireCollection.

Args: collection_ref: The underlying CollectionReference from google-cloud-firestore. client: Optional reference to the parent FireProx client. sync_client: Optional sync Firestore client for lazy loading (async only).

Source code in src/fire_prox/base_fire_collection.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
def __init__(
    self,
    collection_ref: Any,  # CollectionReference or AsyncCollectionReference
    client: Optional[Any] = None,
    sync_client: Optional[Any] = None
):
    """
    Initialize a FireCollection.

    Args:
        collection_ref: The underlying CollectionReference from
                       google-cloud-firestore.
        client: Optional reference to the parent FireProx client.
        sync_client: Optional sync Firestore client for lazy loading (async only).
    """
    self._collection_ref = collection_ref
    self._client = client
    self._sync_client = sync_client

__repr__()

Return a detailed string representation for debugging.

Returns: String showing collection path.

Source code in src/fire_prox/base_fire_collection.py
196
197
198
199
200
201
202
203
def __repr__(self) -> str:
    """
    Return a detailed string representation for debugging.

    Returns:
        String showing collection path.
    """
    return f"<{type(self).__name__} path='{self.path}'>"

__str__()

Return a human-readable string representation.

Returns: String showing the collection path.

Source code in src/fire_prox/base_fire_collection.py
205
206
207
208
209
210
211
212
def __str__(self) -> str:
    """
    Return a human-readable string representation.

    Returns:
        String showing the collection path.
    """
    return f"{type(self).__name__}({self.path})"

batch()

Create a batch for accumulating multiple write operations.

Convenience method for creating batches directly from a collection reference, eliminating the need to access the root FireProx client.

Returns: A native google.cloud.firestore.WriteBatch or google.cloud.firestore.AsyncWriteBatch instance.

Example: users = db.collection('users') batch = users.batch()

# Accumulate operations
user1 = users.doc('alice')
user1.name = 'Alice'
user1.save(batch=batch)

user2 = users.doc('bob')
user2.name = 'Bob'
user2.save(batch=batch)

# Commit all operations atomically
batch.commit()

Note: See BaseFireProx.batch() for detailed documentation on batch operations.

Source code in src/fire_prox/base_fire_collection.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
def batch(self) -> Any:
    """
    Create a batch for accumulating multiple write operations.

    Convenience method for creating batches directly from a collection
    reference, eliminating the need to access the root FireProx client.

    Returns:
        A native google.cloud.firestore.WriteBatch or
        google.cloud.firestore.AsyncWriteBatch instance.

    Example:
        users = db.collection('users')
        batch = users.batch()

        # Accumulate operations
        user1 = users.doc('alice')
        user1.name = 'Alice'
        user1.save(batch=batch)

        user2 = users.doc('bob')
        user2.name = 'Bob'
        user2.save(batch=batch)

        # Commit all operations atomically
        batch.commit()

    Note:
        See BaseFireProx.batch() for detailed documentation on batch operations.
    """
    return self._client.batch()

doc(doc_id)

Create a document proxy in ATTACHED state.

Source code in src/fire_prox/base_fire_collection.py
 95
 96
 97
 98
 99
100
101
102
103
def doc(self, doc_id: str) -> Any:
    """Create a document proxy in ATTACHED state."""
    doc_ref = self._collection_ref.document(doc_id)
    return self._instantiate_object(
        doc_ref=doc_ref,
        initial_state=State.ATTACHED,
        parent_collection=self,
        **self._get_doc_kwargs(doc_id),
    )

new()

Create a new document proxy in DETACHED state.

Source code in src/fire_prox/base_fire_collection.py
86
87
88
89
90
91
92
93
def new(self) -> Any:
    """Create a new document proxy in DETACHED state."""
    return self._instantiate_object(
        doc_ref=None,
        initial_state=State.DETACHED,
        parent_collection=self,
        **self._get_new_kwargs(),
    )

on_snapshot(callback)

Listen for real-time updates to this collection.

This method sets up a real-time listener that fires the callback whenever any document in the collection changes. The listener runs on a separate thread managed by the Firestore SDK.

Important: This is a sync-only feature. Even for AsyncFireCollection instances, the listener uses the synchronous client (via _sync_client) to run on a background thread. This is the standard Firestore pattern for real-time listeners in Python.

Args: callback: Callback function invoked on collection changes. Signature: callback(col_snapshot, changes, read_time) - col_snapshot: List of DocumentSnapshot objects - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED) - read_time: Timestamp of the snapshot

Returns: Watch object with an .unsubscribe() method to stop listening.

Example: import threading

callback_done = threading.Event()

def on_change(col_snapshot, changes, read_time):
    for change in changes:
        if change.type.name == 'ADDED':
            print(f"New document: {change.document.id}")
        elif change.type.name == 'MODIFIED':
            print(f"Modified document: {change.document.id}")
        elif change.type.name == 'REMOVED':
            print(f"Removed document: {change.document.id}")
    callback_done.set()

# Start listening to a collection
users = db.collection('users')
watch = users.on_snapshot(on_change)

# Wait for initial snapshot
callback_done.wait()

# Later: stop listening
watch.unsubscribe()

Note: The callback runs on a separate thread. Use threading primitives (Event, Lock, Queue) for synchronization with your main thread.

Source code in src/fire_prox/base_fire_collection.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
def on_snapshot(self, callback: Any) -> Any:
    """
    Listen for real-time updates to this collection.

    This method sets up a real-time listener that fires the callback
    whenever any document in the collection changes. The listener runs
    on a separate thread managed by the Firestore SDK.

    **Important**: This is a sync-only feature. Even for AsyncFireCollection
    instances, the listener uses the synchronous client (via _sync_client)
    to run on a background thread. This is the standard Firestore pattern
    for real-time listeners in Python.

    Args:
        callback: Callback function invoked on collection changes.
                 Signature: callback(col_snapshot, changes, read_time)
                 - col_snapshot: List of DocumentSnapshot objects
                 - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                 - read_time: Timestamp of the snapshot

    Returns:
        Watch object with an `.unsubscribe()` method to stop listening.

    Example:
        import threading

        callback_done = threading.Event()

        def on_change(col_snapshot, changes, read_time):
            for change in changes:
                if change.type.name == 'ADDED':
                    print(f"New document: {change.document.id}")
                elif change.type.name == 'MODIFIED':
                    print(f"Modified document: {change.document.id}")
                elif change.type.name == 'REMOVED':
                    print(f"Removed document: {change.document.id}")
            callback_done.set()

        # Start listening to a collection
        users = db.collection('users')
        watch = users.on_snapshot(on_change)

        # Wait for initial snapshot
        callback_done.wait()

        # Later: stop listening
        watch.unsubscribe()

    Note:
        The callback runs on a separate thread. Use threading primitives
        (Event, Lock, Queue) for synchronization with your main thread.
    """
    # For sync FireCollection, use _collection_ref directly
    # For async FireCollection, use _sync_client to create sync ref
    if hasattr(self, '_sync_client') and self._sync_client is not None:
        # AsyncFireCollection: create sync collection ref
        collection_ref = self._sync_client.collection(self.path)
    else:
        # FireCollection: use regular collection ref
        collection_ref = self._collection_ref

    # Set up the listener
    return collection_ref.on_snapshot(callback)

transaction()

Create a transaction for atomic read-modify-write operations.

Convenience method for creating transactions directly from a collection reference, eliminating the need to access the root FireProx client.

Returns: A native google.cloud.firestore.Transaction or google.cloud.firestore.AsyncTransaction instance.

Example: users = db.collection('users') transaction = users.transaction()

@firestore.transactional
def update_user(transaction, user_id):
    user = users.doc(user_id)
    user.fetch(transaction=transaction)
    user.visits += 1
    user.save(transaction=transaction)

update_user(transaction, 'alice')
Source code in src/fire_prox/base_fire_collection.py
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def transaction(self) -> Any:
    """
    Create a transaction for atomic read-modify-write operations.

    Convenience method for creating transactions directly from a collection
    reference, eliminating the need to access the root FireProx client.

    Returns:
        A native google.cloud.firestore.Transaction or
        google.cloud.firestore.AsyncTransaction instance.

    Example:
        users = db.collection('users')
        transaction = users.transaction()

        @firestore.transactional
        def update_user(transaction, user_id):
            user = users.doc(user_id)
            user.fetch(transaction=transaction)
            user.visits += 1
            user.save(transaction=transaction)

        update_user(transaction, 'alice')
    """
    return self._client.transaction()

base_fire_object

BaseFireObject: Shared logic for sync and async FireObject implementations.

This module contains the base class that implements all logic that is identical between synchronous and asynchronous FireObject implementations.

BaseFireObject

Base class for FireObject implementations (sync and async).

Contains all shared logic: - State management - State inspection methods - Dirty tracking - Data dictionary management - Property accessors - String representations

Subclasses must implement: - fetch() - with appropriate sync/async signature - save() - with appropriate sync/async signature - delete() - with appropriate sync/async signature - getattr() - may need async support for lazy loading

Source code in src/fire_prox/base_fire_object.py
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
class BaseFireObject:
    """
    Base class for FireObject implementations (sync and async).

    Contains all shared logic:
    - State management
    - State inspection methods
    - Dirty tracking
    - Data dictionary management
    - Property accessors
    - String representations

    Subclasses must implement:
    - fetch() - with appropriate sync/async signature
    - save() - with appropriate sync/async signature
    - delete() - with appropriate sync/async signature
    - __getattr__() - may need async support for lazy loading
    """

    # Class-level constants for internal attribute names
    _INTERNAL_ATTRS = {
        '_doc_ref', '_sync_doc_ref', '_sync_client', '_data', '_state', '_dirty_fields',
        '_deleted_fields', '_atomic_ops', '_parent_collection', '_client', '_id', '_path'
    }

    def __init__(
        self,
        doc_ref: Optional[DocumentReference] = None,
        initial_state: Optional[State] = None,
        parent_collection: Optional[Any] = None,
        sync_doc_ref: Optional[DocumentReference] = None,
        sync_client: Optional[Any] = None
    ):
        """
        Initialize a FireObject.

        Args:
            doc_ref: Optional DocumentReference from native client.
            initial_state: Initial state (defaults to DETACHED if no doc_ref,
                          ATTACHED if doc_ref provided).
            parent_collection: Optional reference to parent FireCollection
                             (needed for save() on DETACHED objects).
            sync_doc_ref: Optional sync DocumentReference (for async lazy loading).
            sync_client: Optional sync Firestore Client (for async subcollections).
        """
        # Set internal attributes directly to avoid __setattr__ logic
        object.__setattr__(self, '_doc_ref', doc_ref)
        object.__setattr__(self, '_sync_doc_ref', sync_doc_ref)
        object.__setattr__(self, '_sync_client', sync_client)
        object.__setattr__(self, '_data', {})
        object.__setattr__(self, '_parent_collection', parent_collection)

        # Determine initial state
        if initial_state is not None:
            object.__setattr__(self, '_state', initial_state)
        elif doc_ref is None:
            object.__setattr__(self, '_state', State.DETACHED)
        else:
            object.__setattr__(self, '_state', State.ATTACHED)

        # Field-level dirty tracking (Phase 2)
        # Track which fields have been modified or deleted since last save/fetch
        object.__setattr__(self, '_dirty_fields', set())
        object.__setattr__(self, '_deleted_fields', set())

        # Atomic operations tracking (Phase 2)
        # Store atomic operations (ArrayUnion, ArrayRemove, Increment) to apply on save
        object.__setattr__(self, '_atomic_ops', {})

    # =========================================================================
    # Firestore I/O Hooks (to be implemented by subclasses)
    # =========================================================================

    def _get_snapshot(self, transaction: Optional[Any] = None) -> Any:
        """Retrieve the latest document snapshot from Firestore."""
        raise NotImplementedError

    def _create_document(self, doc_id: Optional[str] = None) -> Any:
        """Create a new document reference for DETACHED objects."""
        raise NotImplementedError

    def _write_set(
        self,
        data: Dict[str, Any],
        doc_ref: Optional[Any] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> Any:
        """Persist data via a set/overwrite operation."""
        raise NotImplementedError

    def _write_update(
        self,
        update_dict: Dict[str, Any],
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> Any:
        """Persist data via an update/patch operation."""
        raise NotImplementedError

    def _write_delete(self, batch: Optional[Any] = None) -> Any:
        """Delete the document from Firestore."""
        raise NotImplementedError

    # =========================================================================
    # Shared lifecycle helpers
    # =========================================================================

    def _should_skip_fetch(self, force: bool) -> bool:
        """Return True if a fetch can be skipped based on current state."""
        self._validate_not_detached("fetch()")
        self._validate_not_deleted("fetch()")
        return self._state == State.LOADED and not force

    def _process_snapshot(self, snapshot: DocumentSnapshot, *, is_async: bool) -> None:
        """Populate internal state from a Firestore snapshot."""
        if not snapshot.exists:
            raise NotFound(f"Document {self._doc_ref.path} does not exist")

        data = snapshot.to_dict() or {}

        sync_client: Optional[Any] = None
        if is_async:
            if hasattr(self, '_sync_doc_ref') and self._sync_doc_ref is not None:
                sync_client = self._sync_doc_ref._client
            elif getattr(self, '_sync_client', None) is not None:
                sync_client = self._sync_client

        converted_data: Dict[str, Any] = {}
        for key, value in data.items():
            converted_data[key] = self._convert_snapshot_value_for_retrieval(
                value,
                is_async,
                sync_client,
            )

        self._transition_to_loaded(converted_data)

    def _prepare_detached_save(
        self,
        doc_id: Optional[str],
        transaction: Optional[Any],
        batch: Optional[Any],
    ) -> tuple[Any, Dict[str, Any]]:
        """Validate and prepare data for saving a DETACHED object."""
        if transaction is not None:
            raise ValueError(
                "Cannot create new documents (DETACHED -> LOADED) within a transaction. "
                "Create the document first, then use transactions for updates."
            )

        if batch is not None:
            raise ValueError(
                "Cannot create new documents (DETACHED -> LOADED) within a batch. "
                "Create the document first, then use batches for updates."
            )

        doc_ref = self._create_document(doc_id)
        storage_data = self._prepare_data_for_storage()
        return doc_ref, storage_data

    def _build_update_dict(self) -> Dict[str, Any]:
        """Create the payload for partial updates on LOADED objects."""
        update_dict: Dict[str, Any] = {}

        for field in self._dirty_fields:
            update_dict[field] = self._convert_value_for_storage(self._data[field])

        for field in self._deleted_fields:
            update_dict[field] = firestore.DELETE_FIELD

        for field, operation in self._atomic_ops.items():
            update_dict[field] = operation

        return update_dict

    def _get_sync_client_for_async(self) -> Optional[Any]:
        """Return the sync client to support async materialization."""
        if hasattr(self, '_sync_doc_ref') and self._sync_doc_ref is not None:
            return self._sync_doc_ref._client
        return getattr(self, '_sync_client', None)

    def _materialize_value(
        self,
        value: Any,
        *,
        is_async: bool,
        sync_client: Optional[Any],
    ) -> Any:
        """Convert stored values into user-facing objects on demand."""
        if isinstance(value, (DocumentReference, AsyncDocumentReference)):
            return self._convert_snapshot_value_for_retrieval(value, is_async, sync_client)

        if isinstance(value, list):
            changed = False
            converted_items = []
            for item in value:
                converted_item = self._materialize_value(
                    item,
                    is_async=is_async,
                    sync_client=sync_client,
                )
                if converted_item is not item:
                    changed = True
                converted_items.append(converted_item)

            return converted_items if changed else value

        if isinstance(value, dict):
            changed = False
            converted_dict: Dict[str, Any] = {}
            for key, item in value.items():
                converted_item = self._materialize_value(
                    item,
                    is_async=is_async,
                    sync_client=sync_client,
                )
                if converted_item is not item:
                    changed = True
                converted_dict[key] = converted_item

            return converted_dict if changed else value

        return value

    def _materialize_field(self, name: str) -> Any:
        """Return a field value, materializing nested references as needed."""
        if name not in self._data:
            raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")

        is_async = self._is_async_context()
        sync_client = self._get_sync_client_for_async() if is_async else None
        value = self._data[name]
        converted = self._materialize_value(value, is_async=is_async, sync_client=sync_client)

        if converted is not value:
            self._data[name] = converted

        return self._data[name]

    def _prepare_delete(self) -> None:
        """Validate delete preconditions before performing I/O."""
        self._validate_not_detached("delete()")
        self._validate_not_deleted("delete()")

    # =========================================================================
    # State Inspection (SHARED)
    # =========================================================================

    @property
    def state(self) -> State:
        """Get current state of the object."""
        return self._state

    def is_detached(self) -> bool:
        """Check if object is in DETACHED state."""
        return self._state == State.DETACHED

    def is_attached(self) -> bool:
        """Check if object has a DocumentReference (ATTACHED or LOADED)."""
        return self._state in (State.ATTACHED, State.LOADED)

    def is_loaded(self) -> bool:
        """Check if object is in LOADED state."""
        return self._state == State.LOADED

    def is_deleted(self) -> bool:
        """Check if object is in DELETED state."""
        return self._state == State.DELETED

    def is_dirty(self) -> bool:
        """Check if object has unsaved changes."""
        if self._state == State.DETACHED:
            return True  # DETACHED is always dirty
        return (len(self._dirty_fields) > 0 or
                len(self._deleted_fields) > 0 or
                len(self._atomic_ops) > 0)

    @property
    def dirty_fields(self) -> Set[str]:
        """Get the set of modified field names (Phase 2)."""
        return self._dirty_fields.copy()

    @property
    def deleted_fields(self) -> Set[str]:
        """Get the set of deleted field names (Phase 2)."""
        return self._deleted_fields.copy()

    # =========================================================================
    # Document Identity (SHARED)
    # =========================================================================

    @property
    def id(self) -> Optional[str]:
        """Get document ID, or None if DETACHED."""
        return self._doc_ref.id if self._doc_ref else None

    @property
    def path(self) -> Optional[str]:
        """Get full document path, or None if DETACHED."""
        return self._doc_ref.path if self._doc_ref else None

    # =========================================================================
    # Transaction Support (Phase 2)
    # =========================================================================

    def transaction(self) -> Any:
        """
        Create a transaction for atomic read-modify-write operations.

        Convenience method for creating transactions directly from a document
        reference, eliminating the need to access the root FireProx client.

        Returns:
            A native google.cloud.firestore.Transaction or
            google.cloud.firestore.AsyncTransaction instance.

        Raises:
            ValueError: If called on a DETACHED object (no document path yet).

        Example:
            user = db.doc('users/alice')
            transaction = user.transaction()

            @firestore.transactional
            def update_credits(transaction):
                user.fetch(transaction=transaction)
                user.credits += 10
                user.save(transaction=transaction)

            update_credits(transaction)
        """
        self._validate_not_detached("transaction()")

        # Get the client from the document reference
        return self._doc_ref._client.transaction()

    def batch(self) -> Any:
        """
        Create a batch for accumulating multiple write operations.

        Convenience method for creating batches directly from a document
        reference, eliminating the need to access the root FireProx client.

        Returns:
            A native google.cloud.firestore.WriteBatch or
            google.cloud.firestore.AsyncWriteBatch instance.

        Raises:
            ValueError: If called on a DETACHED object (no document path yet).

        Example:
            user = db.doc('users/alice')
            batch = user.batch()

            # Use the batch for multiple operations
            user.credits = 100
            user.save(batch=batch)

            other_user = db.doc('users/bob')
            other_user.delete(batch=batch, recursive=False)

            # Commit all operations atomically
            batch.commit()

        Note:
            See BaseFireProx.batch() for detailed documentation on batch operations.
        """
        self._validate_not_detached("batch()")

        # Get the client from the document reference
        return self._doc_ref._client.batch()

    # =========================================================================
    # Subcollections (Phase 2)
    # =========================================================================

    def collection(self, name: str) -> Any:
        """
        Get a subcollection reference for this document.

        Phase 2 feature. Returns a collection reference for a subcollection
        under this document, enabling hierarchical data structures.

        Args:
            name: Name of the subcollection.

        Returns:
            FireCollection or AsyncFireCollection instance for the subcollection.

        Raises:
            ValueError: If called on a DETACHED object (no document path yet).
            RuntimeError: If called on a DELETED object.

        Example:
            user = db.doc('users/alovelace')
            posts = user.collection('posts')  # Gets 'users/alovelace/posts'
            new_post = posts.new()
            new_post.title = "On Analytical Engines"
            new_post.save()
        """
        self._validate_not_detached("collection()")
        self._validate_not_deleted("collection()")

        # Get subcollection reference from document reference
        subcollection_ref = self._doc_ref.collection(name)

        # Import here to avoid circular dependency
        from .async_fire_collection import AsyncFireCollection
        from .fire_collection import FireCollection

        # Return appropriate collection type based on client type
        # The concrete class will override this if needed
        if hasattr(self._doc_ref, '__class__') and 'Async' in self._doc_ref.__class__.__name__:
            return AsyncFireCollection(
                subcollection_ref,
                client=None,  # Will be inferred from ref
                sync_client=self._sync_client if hasattr(self, '_sync_client') else None
            )
        else:
            return FireCollection(subcollection_ref, client=None)

    # =========================================================================
    # Atomic Operations (Phase 2)
    # =========================================================================

    def array_union(self, field: str, values: list) -> None:
        """
        Mark field for ArrayUnion operation and simulate locally.

        Phase 2 feature. ArrayUnion adds elements to an array field without
        reading the document first. If the array doesn't exist, it creates it.
        Duplicate values are automatically deduplicated.

        The operation is simulated locally, so the array is immediately
        updated in memory. This eliminates the need to call fetch() after save().

        Mutual Exclusivity: A field can be either modified directly (vanilla) OR
        via atomic operations, but not both. Once array_union() is called on a field,
        you cannot modify that field directly until after save().

        Args:
            field: The field name to apply ArrayUnion to.
            values: List of values to add to the array.

        Raises:
            RuntimeError: If called on a DELETED object.
            ValueError: If the field has been modified directly (is dirty).

        Example:
            user = db.doc('users/ada')
            user.array_union('tags', ['python', 'firestore'])
            user.save()  # No fetch() needed - local state is already updated!
        """
        self._validate_not_deleted("array_union()")

        # Validate field is not dirty (mutual exclusivity)
        if field in self._dirty_fields:
            raise ValueError(
                f"Cannot perform atomic array_union on field '{field}' - "
                f"field has been modified directly. Save changes first or use atomic operations exclusively."
            )

        # Simulate locally: get current array (default to []) and add unique values
        current_array = self._data.get(field, [])
        # Add only values that aren't already in the array (deduplication)
        updated_array = current_array + [v for v in values if v not in current_array]
        self._data[field] = updated_array

        # Store the operation for server-side execution
        from google.cloud import firestore
        self._atomic_ops[field] = firestore.ArrayUnion(values)

    def array_remove(self, field: str, values: list) -> None:
        """
        Mark field for ArrayRemove operation and simulate locally.

        Phase 2 feature. ArrayRemove removes specified elements from an array
        field without reading the document first.

        The operation is simulated locally, so the array is immediately
        updated in memory. This eliminates the need to call fetch() after save().

        Mutual Exclusivity: A field can be either modified directly (vanilla) OR
        via atomic operations, but not both. Once array_remove() is called on a field,
        you cannot modify that field directly until after save().

        Args:
            field: The field name to apply ArrayRemove to.
            values: List of values to remove from the array.

        Raises:
            RuntimeError: If called on a DELETED object.
            ValueError: If the field has been modified directly (is dirty).

        Example:
            user = db.doc('users/ada')
            user.array_remove('tags', ['deprecated'])
            user.save()  # No fetch() needed - local state is already updated!
        """
        self._validate_not_deleted("array_remove()")

        # Validate field is not dirty (mutual exclusivity)
        if field in self._dirty_fields:
            raise ValueError(
                f"Cannot perform atomic array_remove on field '{field}' - "
                f"field has been modified directly. Save changes first or use atomic operations exclusively."
            )

        # Simulate locally: filter out values to remove
        current_array = self._data.get(field, [])
        updated_array = [item for item in current_array if item not in values]
        self._data[field] = updated_array

        # Store the operation for server-side execution
        from google.cloud import firestore
        self._atomic_ops[field] = firestore.ArrayRemove(values)

    def increment(self, field: str, value: float) -> None:
        """
        Mark field for Increment operation and simulate locally.

        Phase 2 feature. Increment atomically increments a numeric field by the
        given value without reading the document first. If the field doesn't
        exist, it treats it as 0.

        The operation is simulated locally, so the field value is immediately
        updated in memory. This eliminates the need to call fetch() after save().

        Mutual Exclusivity: A field can be either modified directly (vanilla) OR
        via atomic operations, but not both. Once increment() is called on a field,
        you cannot modify that field directly until after save().

        Args:
            field: The field name to increment.
            value: The amount to increment by (can be negative to decrement).

        Raises:
            RuntimeError: If called on a DELETED object.
            ValueError: If the field has been modified directly (is dirty).

        Example:
            user = db.doc('users/ada')
            user.increment('view_count', 1)
            user.increment('score', -5)  # Decrement by 5
            user.save()  # No fetch() needed - local state is already updated!
        """
        self._validate_not_deleted("increment()")

        # Validate field is not dirty (mutual exclusivity)
        if field in self._dirty_fields:
            raise ValueError(
                f"Cannot perform atomic increment on field '{field}' - "
                f"field has been modified directly. Save changes first or use atomic operations exclusively."
            )

        # Simulate locally: get current value (default to 0) and add increment
        current_value = self._data.get(field, 0)
        self._data[field] = current_value + value

        # Store the operation for server-side execution
        from google.cloud import firestore
        self._atomic_ops[field] = firestore.Increment(value)

    # =========================================================================
    # Attribute Handling (SHARED - but __getattr__ may need override)
    # =========================================================================

    def __setattr__(self, name: str, value: Any) -> None:
        """
        Store attribute in _data dictionary and track in dirty fields.

        Internal attributes (starting with _) are stored directly on object.

        Phase 2: Track field-level changes for efficient partial updates.
        Enforces mutual exclusivity between vanilla and atomic operations.
        """
        # Internal attributes bypass _data storage
        if name in self._INTERNAL_ATTRS:
            object.__setattr__(self, name, value)
            return

        # Cannot modify DELETED objects
        if hasattr(self, '_state') and self._state == State.DELETED:
            raise AttributeError("Cannot modify a DELETED FireObject")

        # Initialize phase - before _data exists
        if not hasattr(self, '_data'):
            object.__setattr__(self, name, value)
        else:
            # Enforce mutual exclusivity: cannot modify field with pending atomic operation
            if hasattr(self, '_atomic_ops') and name in self._atomic_ops:
                raise ValueError(
                    f"Cannot modify field '{name}' directly - "
                    "field has a pending atomic operation. Save changes first "
                    "or use vanilla modifications exclusively."
                )

            # Convert special types for storage (FireObject → DocumentReference, etc.)
            value = self._convert_value_for_storage(value)

            # Store in _data and track in dirty fields
            self._data[name] = value
            self._dirty_fields.add(name)
            # If this field was marked for deletion, remove it from deleted set
            self._deleted_fields.discard(name)

    def __delattr__(self, name: str) -> None:
        """
        Remove field from _data and track in deleted fields.

        Phase 2: Track deletions for efficient partial updates with DELETE_FIELD.
        """
        if self._state == State.DELETED:
            raise AttributeError("Cannot delete attributes from a DELETED FireObject")

        if name not in self._data:
            raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")

        del self._data[name]
        # Track deletion for partial update
        self._deleted_fields.add(name)
        # Remove from dirty fields if it was there
        self._dirty_fields.discard(name)

    # =========================================================================
    # Utility Methods (SHARED)
    # =========================================================================

    def to_dict(self) -> Dict[str, Any]:
        """
        Return shallow copy of internal data.

        Returns:
            Dictionary containing all document fields.

        Raises:
            RuntimeError: If object is in ATTACHED state (data not loaded).
        """
        if self._state == State.ATTACHED:
            raise RuntimeError("Cannot call to_dict() on ATTACHED FireObject. Call fetch() first.")

        return dict(self._data)

    def __repr__(self) -> str:
        """Return detailed string representation."""
        if self._state == State.DETACHED:
            return f"<{type(self).__name__} DETACHED dirty_fields={len(self._dirty_fields)}>"
        dirty_count = len(self._dirty_fields) + len(self._deleted_fields)
        return f"<{type(self).__name__} {self._state.name} path='{self.path}' dirty_fields={dirty_count}>"

    def __str__(self) -> str:
        """Return human-readable string representation."""
        if self._state == State.DETACHED:
            return f"{type(self).__name__}(detached)"
        return f"{type(self).__name__}({self.path})"

    # =========================================================================
    # Protected Helper Methods (SHARED)
    # =========================================================================

    def _validate_not_deleted(self, operation: str) -> None:
        """
        Validate that object is not in DELETED state.

        Args:
            operation: Name of operation being attempted.

        Raises:
            RuntimeError: If object is DELETED.
        """
        if self._state == State.DELETED:
            raise RuntimeError(f"Cannot {operation} on a DELETED FireObject")

    def _validate_not_detached(self, operation: str) -> None:
        """
        Validate that object is not in DETACHED state.

        Args:
            operation: Name of operation being attempted.

        Raises:
            ValueError: If object is DETACHED.
        """
        if self._state == State.DETACHED:
            raise ValueError(f"Cannot {operation} on a DETACHED FireObject (no DocumentReference)")

    def _mark_clean(self) -> None:
        """Mark object as clean (no unsaved changes)."""
        self._dirty_fields.clear()
        self._deleted_fields.clear()
        self._atomic_ops.clear()

    def _prepare_data_for_storage(self) -> Dict[str, Any]:
        """
        Prepare data for storage in Firestore.

        Converts any FireObjects in _data back to DocumentReferences.
        This is needed because __getattr__ may have cached FireObjects in _data.

        Returns:
            Dictionary with all values converted to Firestore-compatible types.
        """
        prepared = {}
        for key, value in self._data.items():
            prepared[key] = self._convert_value_for_storage(value)
        return prepared

    def _mark_dirty(self) -> None:
        """Mark object as dirty (has unsaved changes).

        Note: In Phase 2, this is a fallback for cases where we don't know
        which specific fields changed. Prefer tracking specific fields when possible.
        """
        # Add all current fields to dirty set as a fallback
        self._dirty_fields.update(self._data.keys())

    def _transition_to_loaded(self, data: Dict[str, Any]) -> None:
        """
        Transition to LOADED state with given data.

        Args:
            data: Document data dictionary.
        """
        object.__setattr__(self, '_data', data)
        object.__setattr__(self, '_state', State.LOADED)
        # Clear dirty tracking (Phase 2: field-level tracking)
        self._dirty_fields.clear()
        self._deleted_fields.clear()
        self._atomic_ops.clear()

    def _transition_to_deleted(self) -> None:
        """Transition to DELETED state."""
        object.__setattr__(self, '_state', State.DELETED)

    # =========================================================================
    # Real-Time Listeners (Sync-only via _sync_doc_ref or _doc_ref)
    # =========================================================================

    def on_snapshot(self, callback: Any) -> Any:
        """
        Listen for real-time updates to this document.

        This method sets up a real-time listener that fires the callback
        whenever the document changes in Firestore. The listener runs on
        a separate thread managed by the Firestore SDK.

        **Important**: This is a sync-only feature. Even for AsyncFireObject
        instances, the listener uses the synchronous client (via _sync_doc_ref)
        to run on a background thread. This is the standard Firestore pattern
        for real-time listeners in Python.

        Args:
            callback: Callback function invoked on document changes.
                     Signature: callback(doc_snapshot, changes, read_time)
                     - doc_snapshot: List of DocumentSnapshot objects
                     - changes: List of DocumentChange objects
                     - read_time: Timestamp of the snapshot

        Returns:
            Watch object with an `.unsubscribe()` method to stop listening.

        Raises:
            ValueError: If called on a DETACHED object (no document path).
            RuntimeError: If called on a DELETED object.

        Example:
            import threading

            # Create event for synchronization
            callback_done = threading.Event()

            def on_change(doc_snapshot, changes, read_time):
                for doc in doc_snapshot:
                    print(f"Document updated: {doc.to_dict()}")
                callback_done.set()

            # Start listening
            user = db.doc('users/alice')
            watch = user.on_snapshot(on_change)

            # Wait for initial snapshot
            callback_done.wait()

            # Later: stop listening
            watch.unsubscribe()

        Note:
            The callback runs on a separate thread. Use threading primitives
            (Event, Lock, Queue) for synchronization with your main thread.
        """
        self._validate_not_detached("on_snapshot()")
        self._validate_not_deleted("on_snapshot()")

        # For sync FireObject, use _doc_ref directly
        # For async FireObject, use _sync_doc_ref (always available)
        if hasattr(self, '_sync_doc_ref') and self._sync_doc_ref is not None:
            # AsyncFireObject: use sync doc ref for listener
            doc_ref = self._sync_doc_ref
        else:
            # FireObject: use regular doc ref
            doc_ref = self._doc_ref

        # Set up the listener
        return doc_ref.on_snapshot(callback)

    def _is_async_context(self) -> bool:
        """
        Determine if this FireObject is in an async context.

        Returns:
            True if this is an AsyncFireObject, False if sync FireObject.

        Example:
            if self._is_async_context():
                # Use async patterns
            else:
                # Use sync patterns
        """
        # Check if we have a doc_ref and if it's async
        if self._doc_ref is not None:
            return 'Async' in self._doc_ref.__class__.__name__

        # Fall back to checking the class name
        return 'Async' in self.__class__.__name__

    def _convert_value_for_storage(self, value: Any) -> Any:
        """
        Convert a value for storage in Firestore, handling special types.

        Recursively processes values to convert:
        - FireObject/AsyncFireObject → DocumentReference
        - DocumentReference → pass through (allow raw refs)
        - Lists → recursively process items
        - Dicts → recursively process values

        Args:
            value: The value to convert.

        Returns:
            The converted value ready for Firestore storage.

        Raises:
            ValueError: If trying to store a DETACHED FireObject.
            TypeError: If trying to mix sync and async FireObjects.

        Example:
            # Assign a FireObject reference
            post.author = user  # user is a FireObject
            # Internally converts to DocumentReference
        """
        # Handle FireObject/AsyncFireObject → DocumentReference
        if isinstance(value, BaseFireObject):
            # Validate not DETACHED
            if value._state == State.DETACHED:
                raise ValueError(
                    "Cannot assign a DETACHED FireObject as a reference. "
                    "The object must be saved first to have a document path."
                )

            # Validate type compatibility (sync vs async)
            is_async = self._is_async_context()
            value_is_async = value._is_async_context()

            if is_async != value_is_async:
                raise TypeError(
                    f"Cannot assign {'async' if value_is_async else 'sync'} FireObject "
                    f"to {'async' if is_async else 'sync'} FireObject. "
                    "Both objects must be from the same context (sync or async)."
                )

            # Convert to DocumentReference
            return value._doc_ref

        # Handle DocumentReference → pass through (allow raw refs)
        if isinstance(value, (DocumentReference, AsyncDocumentReference)):
            return value

        # Handle lists → recursively convert items
        if isinstance(value, list):
            return [self._convert_value_for_storage(item) for item in value]

        # Handle dicts → recursively convert values
        if isinstance(value, dict):
            return {k: self._convert_value_for_storage(v) for k, v in value.items()}

        # Everything else passes through unchanged
        return value

    @classmethod
    def _convert_snapshot_value_for_retrieval(
        cls,
        value: Any,
        is_async: bool,
        sync_client: Optional[Any] = None
    ) -> Any:
        """
        Convert a value from Firestore snapshot for Python use.

        Recursively processes values to convert:
        - DocumentReference → FireObject/AsyncFireObject (ATTACHED state)
        - Lists → recursively process items
        - Dicts → recursively process values

        Args:
            value: The value from Firestore snapshot.
            is_async: Whether to create async or sync FireObjects.
            sync_client: Optional sync Firestore client for async lazy loading.

        Returns:
            The converted value ready for Python use.

        Example:
            # Reading a document with a reference field
            doc.fetch()
            author = doc.author  # Automatically converted to FireObject
        """
        # Handle DocumentReference → FireObject/AsyncFireObject
        if isinstance(value, (DocumentReference, AsyncDocumentReference)):
            if is_async:
                from .async_fire_object import AsyncFireObject
                # For async, provide sync_doc_ref for lazy loading
                sync_ref = None
                if isinstance(value, DocumentReference):
                    # It's already a sync ref
                    sync_ref = value
                elif isinstance(value, AsyncDocumentReference) and sync_client:
                    # Create sync ref from async ref using sync_client
                    sync_ref = sync_client.document(value.path)

                return AsyncFireObject(
                    doc_ref=value,
                    initial_state=State.ATTACHED,
                    sync_doc_ref=sync_ref,
                    sync_client=sync_client
                )
            else:
                from .fire_object import FireObject
                return FireObject(doc_ref=value, initial_state=State.ATTACHED)

        # Handle lists → recursively convert items
        if isinstance(value, list):
            return [cls._convert_snapshot_value_for_retrieval(item, is_async, sync_client) for item in value]

        # Handle dicts → recursively convert values
        if isinstance(value, dict):
            return {k: cls._convert_snapshot_value_for_retrieval(v, is_async, sync_client) for k, v in value.items()}

        # Everything else passes through unchanged
        return value

    @classmethod
    def _create_from_snapshot_base(
        cls,
        snapshot: DocumentSnapshot,
        parent_collection: Optional[Any] = None,
        sync_client: Optional[Any] = None
    ) -> Dict[str, Any]:
        """
        Extract data for creating FireObject from snapshot.

        This is shared logic for from_snapshot() factory methods.

        Args:
            snapshot: DocumentSnapshot from native API.
            parent_collection: Optional parent collection reference.
            sync_client: Optional sync Firestore client for async lazy loading.

        Returns:
            Dictionary with initialization parameters.

        Raises:
            ValueError: If snapshot doesn't exist.
        """
        if not snapshot.exists:
            raise ValueError("Cannot create FireObject from non-existent snapshot")

        # Get data from snapshot
        data = snapshot.to_dict() or {}

        # Detect async context from snapshot reference
        is_async = 'Async' in snapshot.reference.__class__.__name__

        # Convert all values (DocumentReference → FireObject, etc.)
        converted_data = {}
        for key, value in data.items():
            converted_data[key] = cls._convert_snapshot_value_for_retrieval(value, is_async, sync_client)

        return {
            'doc_ref': snapshot.reference,
            'initial_state': State.LOADED,
            'parent_collection': parent_collection,
            'data': converted_data
        }

deleted_fields property

Get the set of deleted field names (Phase 2).

dirty_fields property

Get the set of modified field names (Phase 2).

id property

Get document ID, or None if DETACHED.

path property

Get full document path, or None if DETACHED.

state property

Get current state of the object.

__delattr__(name)

Remove field from _data and track in deleted fields.

Phase 2: Track deletions for efficient partial updates with DELETE_FIELD.

Source code in src/fire_prox/base_fire_object.py
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
def __delattr__(self, name: str) -> None:
    """
    Remove field from _data and track in deleted fields.

    Phase 2: Track deletions for efficient partial updates with DELETE_FIELD.
    """
    if self._state == State.DELETED:
        raise AttributeError("Cannot delete attributes from a DELETED FireObject")

    if name not in self._data:
        raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")

    del self._data[name]
    # Track deletion for partial update
    self._deleted_fields.add(name)
    # Remove from dirty fields if it was there
    self._dirty_fields.discard(name)

__init__(doc_ref=None, initial_state=None, parent_collection=None, sync_doc_ref=None, sync_client=None)

Initialize a FireObject.

Args: doc_ref: Optional DocumentReference from native client. initial_state: Initial state (defaults to DETACHED if no doc_ref, ATTACHED if doc_ref provided). parent_collection: Optional reference to parent FireCollection (needed for save() on DETACHED objects). sync_doc_ref: Optional sync DocumentReference (for async lazy loading). sync_client: Optional sync Firestore Client (for async subcollections).

Source code in src/fire_prox/base_fire_object.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
def __init__(
    self,
    doc_ref: Optional[DocumentReference] = None,
    initial_state: Optional[State] = None,
    parent_collection: Optional[Any] = None,
    sync_doc_ref: Optional[DocumentReference] = None,
    sync_client: Optional[Any] = None
):
    """
    Initialize a FireObject.

    Args:
        doc_ref: Optional DocumentReference from native client.
        initial_state: Initial state (defaults to DETACHED if no doc_ref,
                      ATTACHED if doc_ref provided).
        parent_collection: Optional reference to parent FireCollection
                         (needed for save() on DETACHED objects).
        sync_doc_ref: Optional sync DocumentReference (for async lazy loading).
        sync_client: Optional sync Firestore Client (for async subcollections).
    """
    # Set internal attributes directly to avoid __setattr__ logic
    object.__setattr__(self, '_doc_ref', doc_ref)
    object.__setattr__(self, '_sync_doc_ref', sync_doc_ref)
    object.__setattr__(self, '_sync_client', sync_client)
    object.__setattr__(self, '_data', {})
    object.__setattr__(self, '_parent_collection', parent_collection)

    # Determine initial state
    if initial_state is not None:
        object.__setattr__(self, '_state', initial_state)
    elif doc_ref is None:
        object.__setattr__(self, '_state', State.DETACHED)
    else:
        object.__setattr__(self, '_state', State.ATTACHED)

    # Field-level dirty tracking (Phase 2)
    # Track which fields have been modified or deleted since last save/fetch
    object.__setattr__(self, '_dirty_fields', set())
    object.__setattr__(self, '_deleted_fields', set())

    # Atomic operations tracking (Phase 2)
    # Store atomic operations (ArrayUnion, ArrayRemove, Increment) to apply on save
    object.__setattr__(self, '_atomic_ops', {})

__repr__()

Return detailed string representation.

Source code in src/fire_prox/base_fire_object.py
662
663
664
665
666
667
def __repr__(self) -> str:
    """Return detailed string representation."""
    if self._state == State.DETACHED:
        return f"<{type(self).__name__} DETACHED dirty_fields={len(self._dirty_fields)}>"
    dirty_count = len(self._dirty_fields) + len(self._deleted_fields)
    return f"<{type(self).__name__} {self._state.name} path='{self.path}' dirty_fields={dirty_count}>"

__setattr__(name, value)

Store attribute in _data dictionary and track in dirty fields.

Internal attributes (starting with _) are stored directly on object.

Phase 2: Track field-level changes for efficient partial updates. Enforces mutual exclusivity between vanilla and atomic operations.

Source code in src/fire_prox/base_fire_object.py
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
def __setattr__(self, name: str, value: Any) -> None:
    """
    Store attribute in _data dictionary and track in dirty fields.

    Internal attributes (starting with _) are stored directly on object.

    Phase 2: Track field-level changes for efficient partial updates.
    Enforces mutual exclusivity between vanilla and atomic operations.
    """
    # Internal attributes bypass _data storage
    if name in self._INTERNAL_ATTRS:
        object.__setattr__(self, name, value)
        return

    # Cannot modify DELETED objects
    if hasattr(self, '_state') and self._state == State.DELETED:
        raise AttributeError("Cannot modify a DELETED FireObject")

    # Initialize phase - before _data exists
    if not hasattr(self, '_data'):
        object.__setattr__(self, name, value)
    else:
        # Enforce mutual exclusivity: cannot modify field with pending atomic operation
        if hasattr(self, '_atomic_ops') and name in self._atomic_ops:
            raise ValueError(
                f"Cannot modify field '{name}' directly - "
                "field has a pending atomic operation. Save changes first "
                "or use vanilla modifications exclusively."
            )

        # Convert special types for storage (FireObject → DocumentReference, etc.)
        value = self._convert_value_for_storage(value)

        # Store in _data and track in dirty fields
        self._data[name] = value
        self._dirty_fields.add(name)
        # If this field was marked for deletion, remove it from deleted set
        self._deleted_fields.discard(name)

__str__()

Return human-readable string representation.

Source code in src/fire_prox/base_fire_object.py
669
670
671
672
673
def __str__(self) -> str:
    """Return human-readable string representation."""
    if self._state == State.DETACHED:
        return f"{type(self).__name__}(detached)"
    return f"{type(self).__name__}({self.path})"

array_remove(field, values)

Mark field for ArrayRemove operation and simulate locally.

Phase 2 feature. ArrayRemove removes specified elements from an array field without reading the document first.

The operation is simulated locally, so the array is immediately updated in memory. This eliminates the need to call fetch() after save().

Mutual Exclusivity: A field can be either modified directly (vanilla) OR via atomic operations, but not both. Once array_remove() is called on a field, you cannot modify that field directly until after save().

Args: field: The field name to apply ArrayRemove to. values: List of values to remove from the array.

Raises: RuntimeError: If called on a DELETED object. ValueError: If the field has been modified directly (is dirty).

Example: user = db.doc('users/ada') user.array_remove('tags', ['deprecated']) user.save() # No fetch() needed - local state is already updated!

Source code in src/fire_prox/base_fire_object.py
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
def array_remove(self, field: str, values: list) -> None:
    """
    Mark field for ArrayRemove operation and simulate locally.

    Phase 2 feature. ArrayRemove removes specified elements from an array
    field without reading the document first.

    The operation is simulated locally, so the array is immediately
    updated in memory. This eliminates the need to call fetch() after save().

    Mutual Exclusivity: A field can be either modified directly (vanilla) OR
    via atomic operations, but not both. Once array_remove() is called on a field,
    you cannot modify that field directly until after save().

    Args:
        field: The field name to apply ArrayRemove to.
        values: List of values to remove from the array.

    Raises:
        RuntimeError: If called on a DELETED object.
        ValueError: If the field has been modified directly (is dirty).

    Example:
        user = db.doc('users/ada')
        user.array_remove('tags', ['deprecated'])
        user.save()  # No fetch() needed - local state is already updated!
    """
    self._validate_not_deleted("array_remove()")

    # Validate field is not dirty (mutual exclusivity)
    if field in self._dirty_fields:
        raise ValueError(
            f"Cannot perform atomic array_remove on field '{field}' - "
            f"field has been modified directly. Save changes first or use atomic operations exclusively."
        )

    # Simulate locally: filter out values to remove
    current_array = self._data.get(field, [])
    updated_array = [item for item in current_array if item not in values]
    self._data[field] = updated_array

    # Store the operation for server-side execution
    from google.cloud import firestore
    self._atomic_ops[field] = firestore.ArrayRemove(values)

array_union(field, values)

Mark field for ArrayUnion operation and simulate locally.

Phase 2 feature. ArrayUnion adds elements to an array field without reading the document first. If the array doesn't exist, it creates it. Duplicate values are automatically deduplicated.

The operation is simulated locally, so the array is immediately updated in memory. This eliminates the need to call fetch() after save().

Mutual Exclusivity: A field can be either modified directly (vanilla) OR via atomic operations, but not both. Once array_union() is called on a field, you cannot modify that field directly until after save().

Args: field: The field name to apply ArrayUnion to. values: List of values to add to the array.

Raises: RuntimeError: If called on a DELETED object. ValueError: If the field has been modified directly (is dirty).

Example: user = db.doc('users/ada') user.array_union('tags', ['python', 'firestore']) user.save() # No fetch() needed - local state is already updated!

Source code in src/fire_prox/base_fire_object.py
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
def array_union(self, field: str, values: list) -> None:
    """
    Mark field for ArrayUnion operation and simulate locally.

    Phase 2 feature. ArrayUnion adds elements to an array field without
    reading the document first. If the array doesn't exist, it creates it.
    Duplicate values are automatically deduplicated.

    The operation is simulated locally, so the array is immediately
    updated in memory. This eliminates the need to call fetch() after save().

    Mutual Exclusivity: A field can be either modified directly (vanilla) OR
    via atomic operations, but not both. Once array_union() is called on a field,
    you cannot modify that field directly until after save().

    Args:
        field: The field name to apply ArrayUnion to.
        values: List of values to add to the array.

    Raises:
        RuntimeError: If called on a DELETED object.
        ValueError: If the field has been modified directly (is dirty).

    Example:
        user = db.doc('users/ada')
        user.array_union('tags', ['python', 'firestore'])
        user.save()  # No fetch() needed - local state is already updated!
    """
    self._validate_not_deleted("array_union()")

    # Validate field is not dirty (mutual exclusivity)
    if field in self._dirty_fields:
        raise ValueError(
            f"Cannot perform atomic array_union on field '{field}' - "
            f"field has been modified directly. Save changes first or use atomic operations exclusively."
        )

    # Simulate locally: get current array (default to []) and add unique values
    current_array = self._data.get(field, [])
    # Add only values that aren't already in the array (deduplication)
    updated_array = current_array + [v for v in values if v not in current_array]
    self._data[field] = updated_array

    # Store the operation for server-side execution
    from google.cloud import firestore
    self._atomic_ops[field] = firestore.ArrayUnion(values)

batch()

Create a batch for accumulating multiple write operations.

Convenience method for creating batches directly from a document reference, eliminating the need to access the root FireProx client.

Returns: A native google.cloud.firestore.WriteBatch or google.cloud.firestore.AsyncWriteBatch instance.

Raises: ValueError: If called on a DETACHED object (no document path yet).

Example: user = db.doc('users/alice') batch = user.batch()

# Use the batch for multiple operations
user.credits = 100
user.save(batch=batch)

other_user = db.doc('users/bob')
other_user.delete(batch=batch, recursive=False)

# Commit all operations atomically
batch.commit()

Note: See BaseFireProx.batch() for detailed documentation on batch operations.

Source code in src/fire_prox/base_fire_object.py
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
def batch(self) -> Any:
    """
    Create a batch for accumulating multiple write operations.

    Convenience method for creating batches directly from a document
    reference, eliminating the need to access the root FireProx client.

    Returns:
        A native google.cloud.firestore.WriteBatch or
        google.cloud.firestore.AsyncWriteBatch instance.

    Raises:
        ValueError: If called on a DETACHED object (no document path yet).

    Example:
        user = db.doc('users/alice')
        batch = user.batch()

        # Use the batch for multiple operations
        user.credits = 100
        user.save(batch=batch)

        other_user = db.doc('users/bob')
        other_user.delete(batch=batch, recursive=False)

        # Commit all operations atomically
        batch.commit()

    Note:
        See BaseFireProx.batch() for detailed documentation on batch operations.
    """
    self._validate_not_detached("batch()")

    # Get the client from the document reference
    return self._doc_ref._client.batch()

collection(name)

Get a subcollection reference for this document.

Phase 2 feature. Returns a collection reference for a subcollection under this document, enabling hierarchical data structures.

Args: name: Name of the subcollection.

Returns: FireCollection or AsyncFireCollection instance for the subcollection.

Raises: ValueError: If called on a DETACHED object (no document path yet). RuntimeError: If called on a DELETED object.

Example: user = db.doc('users/alovelace') posts = user.collection('posts') # Gets 'users/alovelace/posts' new_post = posts.new() new_post.title = "On Analytical Engines" new_post.save()

Source code in src/fire_prox/base_fire_object.py
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
def collection(self, name: str) -> Any:
    """
    Get a subcollection reference for this document.

    Phase 2 feature. Returns a collection reference for a subcollection
    under this document, enabling hierarchical data structures.

    Args:
        name: Name of the subcollection.

    Returns:
        FireCollection or AsyncFireCollection instance for the subcollection.

    Raises:
        ValueError: If called on a DETACHED object (no document path yet).
        RuntimeError: If called on a DELETED object.

    Example:
        user = db.doc('users/alovelace')
        posts = user.collection('posts')  # Gets 'users/alovelace/posts'
        new_post = posts.new()
        new_post.title = "On Analytical Engines"
        new_post.save()
    """
    self._validate_not_detached("collection()")
    self._validate_not_deleted("collection()")

    # Get subcollection reference from document reference
    subcollection_ref = self._doc_ref.collection(name)

    # Import here to avoid circular dependency
    from .async_fire_collection import AsyncFireCollection
    from .fire_collection import FireCollection

    # Return appropriate collection type based on client type
    # The concrete class will override this if needed
    if hasattr(self._doc_ref, '__class__') and 'Async' in self._doc_ref.__class__.__name__:
        return AsyncFireCollection(
            subcollection_ref,
            client=None,  # Will be inferred from ref
            sync_client=self._sync_client if hasattr(self, '_sync_client') else None
        )
    else:
        return FireCollection(subcollection_ref, client=None)

increment(field, value)

Mark field for Increment operation and simulate locally.

Phase 2 feature. Increment atomically increments a numeric field by the given value without reading the document first. If the field doesn't exist, it treats it as 0.

The operation is simulated locally, so the field value is immediately updated in memory. This eliminates the need to call fetch() after save().

Mutual Exclusivity: A field can be either modified directly (vanilla) OR via atomic operations, but not both. Once increment() is called on a field, you cannot modify that field directly until after save().

Args: field: The field name to increment. value: The amount to increment by (can be negative to decrement).

Raises: RuntimeError: If called on a DELETED object. ValueError: If the field has been modified directly (is dirty).

Example: user = db.doc('users/ada') user.increment('view_count', 1) user.increment('score', -5) # Decrement by 5 user.save() # No fetch() needed - local state is already updated!

Source code in src/fire_prox/base_fire_object.py
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
def increment(self, field: str, value: float) -> None:
    """
    Mark field for Increment operation and simulate locally.

    Phase 2 feature. Increment atomically increments a numeric field by the
    given value without reading the document first. If the field doesn't
    exist, it treats it as 0.

    The operation is simulated locally, so the field value is immediately
    updated in memory. This eliminates the need to call fetch() after save().

    Mutual Exclusivity: A field can be either modified directly (vanilla) OR
    via atomic operations, but not both. Once increment() is called on a field,
    you cannot modify that field directly until after save().

    Args:
        field: The field name to increment.
        value: The amount to increment by (can be negative to decrement).

    Raises:
        RuntimeError: If called on a DELETED object.
        ValueError: If the field has been modified directly (is dirty).

    Example:
        user = db.doc('users/ada')
        user.increment('view_count', 1)
        user.increment('score', -5)  # Decrement by 5
        user.save()  # No fetch() needed - local state is already updated!
    """
    self._validate_not_deleted("increment()")

    # Validate field is not dirty (mutual exclusivity)
    if field in self._dirty_fields:
        raise ValueError(
            f"Cannot perform atomic increment on field '{field}' - "
            f"field has been modified directly. Save changes first or use atomic operations exclusively."
        )

    # Simulate locally: get current value (default to 0) and add increment
    current_value = self._data.get(field, 0)
    self._data[field] = current_value + value

    # Store the operation for server-side execution
    from google.cloud import firestore
    self._atomic_ops[field] = firestore.Increment(value)

is_attached()

Check if object has a DocumentReference (ATTACHED or LOADED).

Source code in src/fire_prox/base_fire_object.py
276
277
278
def is_attached(self) -> bool:
    """Check if object has a DocumentReference (ATTACHED or LOADED)."""
    return self._state in (State.ATTACHED, State.LOADED)

is_deleted()

Check if object is in DELETED state.

Source code in src/fire_prox/base_fire_object.py
284
285
286
def is_deleted(self) -> bool:
    """Check if object is in DELETED state."""
    return self._state == State.DELETED

is_detached()

Check if object is in DETACHED state.

Source code in src/fire_prox/base_fire_object.py
272
273
274
def is_detached(self) -> bool:
    """Check if object is in DETACHED state."""
    return self._state == State.DETACHED

is_dirty()

Check if object has unsaved changes.

Source code in src/fire_prox/base_fire_object.py
288
289
290
291
292
293
294
def is_dirty(self) -> bool:
    """Check if object has unsaved changes."""
    if self._state == State.DETACHED:
        return True  # DETACHED is always dirty
    return (len(self._dirty_fields) > 0 or
            len(self._deleted_fields) > 0 or
            len(self._atomic_ops) > 0)

is_loaded()

Check if object is in LOADED state.

Source code in src/fire_prox/base_fire_object.py
280
281
282
def is_loaded(self) -> bool:
    """Check if object is in LOADED state."""
    return self._state == State.LOADED

on_snapshot(callback)

Listen for real-time updates to this document.

This method sets up a real-time listener that fires the callback whenever the document changes in Firestore. The listener runs on a separate thread managed by the Firestore SDK.

Important: This is a sync-only feature. Even for AsyncFireObject instances, the listener uses the synchronous client (via _sync_doc_ref) to run on a background thread. This is the standard Firestore pattern for real-time listeners in Python.

Args: callback: Callback function invoked on document changes. Signature: callback(doc_snapshot, changes, read_time) - doc_snapshot: List of DocumentSnapshot objects - changes: List of DocumentChange objects - read_time: Timestamp of the snapshot

Returns: Watch object with an .unsubscribe() method to stop listening.

Raises: ValueError: If called on a DETACHED object (no document path). RuntimeError: If called on a DELETED object.

Example: import threading

# Create event for synchronization
callback_done = threading.Event()

def on_change(doc_snapshot, changes, read_time):
    for doc in doc_snapshot:
        print(f"Document updated: {doc.to_dict()}")
    callback_done.set()

# Start listening
user = db.doc('users/alice')
watch = user.on_snapshot(on_change)

# Wait for initial snapshot
callback_done.wait()

# Later: stop listening
watch.unsubscribe()

Note: The callback runs on a separate thread. Use threading primitives (Event, Lock, Queue) for synchronization with your main thread.

Source code in src/fire_prox/base_fire_object.py
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
def on_snapshot(self, callback: Any) -> Any:
    """
    Listen for real-time updates to this document.

    This method sets up a real-time listener that fires the callback
    whenever the document changes in Firestore. The listener runs on
    a separate thread managed by the Firestore SDK.

    **Important**: This is a sync-only feature. Even for AsyncFireObject
    instances, the listener uses the synchronous client (via _sync_doc_ref)
    to run on a background thread. This is the standard Firestore pattern
    for real-time listeners in Python.

    Args:
        callback: Callback function invoked on document changes.
                 Signature: callback(doc_snapshot, changes, read_time)
                 - doc_snapshot: List of DocumentSnapshot objects
                 - changes: List of DocumentChange objects
                 - read_time: Timestamp of the snapshot

    Returns:
        Watch object with an `.unsubscribe()` method to stop listening.

    Raises:
        ValueError: If called on a DETACHED object (no document path).
        RuntimeError: If called on a DELETED object.

    Example:
        import threading

        # Create event for synchronization
        callback_done = threading.Event()

        def on_change(doc_snapshot, changes, read_time):
            for doc in doc_snapshot:
                print(f"Document updated: {doc.to_dict()}")
            callback_done.set()

        # Start listening
        user = db.doc('users/alice')
        watch = user.on_snapshot(on_change)

        # Wait for initial snapshot
        callback_done.wait()

        # Later: stop listening
        watch.unsubscribe()

    Note:
        The callback runs on a separate thread. Use threading primitives
        (Event, Lock, Queue) for synchronization with your main thread.
    """
    self._validate_not_detached("on_snapshot()")
    self._validate_not_deleted("on_snapshot()")

    # For sync FireObject, use _doc_ref directly
    # For async FireObject, use _sync_doc_ref (always available)
    if hasattr(self, '_sync_doc_ref') and self._sync_doc_ref is not None:
        # AsyncFireObject: use sync doc ref for listener
        doc_ref = self._sync_doc_ref
    else:
        # FireObject: use regular doc ref
        doc_ref = self._doc_ref

    # Set up the listener
    return doc_ref.on_snapshot(callback)

to_dict()

Return shallow copy of internal data.

Returns: Dictionary containing all document fields.

Raises: RuntimeError: If object is in ATTACHED state (data not loaded).

Source code in src/fire_prox/base_fire_object.py
647
648
649
650
651
652
653
654
655
656
657
658
659
660
def to_dict(self) -> Dict[str, Any]:
    """
    Return shallow copy of internal data.

    Returns:
        Dictionary containing all document fields.

    Raises:
        RuntimeError: If object is in ATTACHED state (data not loaded).
    """
    if self._state == State.ATTACHED:
        raise RuntimeError("Cannot call to_dict() on ATTACHED FireObject. Call fetch() first.")

    return dict(self._data)

transaction()

Create a transaction for atomic read-modify-write operations.

Convenience method for creating transactions directly from a document reference, eliminating the need to access the root FireProx client.

Returns: A native google.cloud.firestore.Transaction or google.cloud.firestore.AsyncTransaction instance.

Raises: ValueError: If called on a DETACHED object (no document path yet).

Example: user = db.doc('users/alice') transaction = user.transaction()

@firestore.transactional
def update_credits(transaction):
    user.fetch(transaction=transaction)
    user.credits += 10
    user.save(transaction=transaction)

update_credits(transaction)
Source code in src/fire_prox/base_fire_object.py
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
def transaction(self) -> Any:
    """
    Create a transaction for atomic read-modify-write operations.

    Convenience method for creating transactions directly from a document
    reference, eliminating the need to access the root FireProx client.

    Returns:
        A native google.cloud.firestore.Transaction or
        google.cloud.firestore.AsyncTransaction instance.

    Raises:
        ValueError: If called on a DETACHED object (no document path yet).

    Example:
        user = db.doc('users/alice')
        transaction = user.transaction()

        @firestore.transactional
        def update_credits(transaction):
            user.fetch(transaction=transaction)
            user.credits += 10
            user.save(transaction=transaction)

        update_credits(transaction)
    """
    self._validate_not_detached("transaction()")

    # Get the client from the document reference
    return self._doc_ref._client.transaction()

base_fireprox

BaseFireProx: Shared logic for sync and async FireProx implementations.

This module contains the base class that implements all logic that is identical between synchronous and asynchronous FireProx implementations.

BaseFireProx

Base class for FireProx implementations (sync and async).

Contains all shared logic: - Client storage - Path validation - String representations

Subclasses must implement: - doc() - creates FireObject/AsyncFireObject - collection() - creates FireCollection/AsyncFireCollection

Source code in src/fire_prox/base_fireprox.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
class BaseFireProx:
    """
    Base class for FireProx implementations (sync and async).

    Contains all shared logic:
    - Client storage
    - Path validation
    - String representations

    Subclasses must implement:
    - doc() - creates FireObject/AsyncFireObject
    - collection() - creates FireCollection/AsyncFireCollection
    """

    def __init__(self, client: Any):
        """
        Initialize FireProx with a native Firestore client.

        Args:
            client: A configured google.cloud.firestore.Client or
                   google.cloud.firestore.AsyncClient instance.

        Note:
            Type checking is handled in subclasses since they know
            which client type to expect.
        """
        self._client = client

    # =========================================================================
    # Client Access (SHARED)
    # =========================================================================

    @property
    def native_client(self) -> Any:
        """
        Get the underlying google-cloud-firestore Client.

        Provides an "escape hatch" for users who need to perform operations
        not yet supported by FireProx or who want to use advanced native
        features like transactions, batched writes, or complex queries.

        Returns:
            The google.cloud.firestore.Client or AsyncClient instance.
        """
        return self._client

    @property
    def client(self) -> Any:
        """
        Alias for native_client. Get the underlying Firestore Client.

        Returns:
            The google.cloud.firestore.Client or AsyncClient instance.
        """
        return self._client

    # =========================================================================
    # Transaction Support (SHARED)
    # =========================================================================

    def transaction(self) -> Any:
        """
        Create a transaction for atomic read-modify-write operations.

        Returns the native Firestore transaction object that can be used
        with the @firestore.transactional decorator for synchronous operations
        or @firestore.async_transactional for asynchronous operations.

        This method provides a convenient way to create transactions without
        manually accessing the underlying client. The returned transaction
        object is a native Firestore Transaction that should be passed to
        functions decorated with @firestore.transactional.

        Returns:
            A native google.cloud.firestore.Transaction or
            google.cloud.firestore.AsyncTransaction instance.

        Example (Synchronous):
            transaction = db.transaction()

            @firestore.transactional
            def transfer_credits(transaction, from_id, to_id, amount):
                from_user = db.doc(f'users/{from_id}')
                to_user = db.doc(f'users/{to_id}')

                from_user.fetch(transaction=transaction)
                to_user.fetch(transaction=transaction)

                from_user.credits -= amount
                to_user.credits += amount

                from_user.save(transaction=transaction)
                to_user.save(transaction=transaction)

            transfer_credits(transaction, 'alice', 'bob', 100)

        Example (Asynchronous):
            transaction = db.transaction()

            @firestore.async_transactional
            async def transfer_credits(transaction, from_id, to_id, amount):
                from_user = db.doc(f'users/{from_id}')
                to_user = db.doc(f'users/{to_id}')

                await from_user.fetch(transaction=transaction)
                await to_user.fetch(transaction=transaction)

                from_user.credits -= amount
                to_user.credits += amount

                await from_user.save(transaction=transaction)
                await to_user.save(transaction=transaction)

            await transfer_credits(transaction, 'alice', 'bob', 100)
        """
        return self._client.transaction()

    def batch(self) -> Any:
        """
        Create a batch for accumulating multiple write operations.

        Returns the native Firestore WriteBatch object that can be used
        to accumulate write operations (set, update, delete) and commit
        them atomically in a single request.

        Unlike transactions, batches:
        - Do NOT support read operations
        - Do NOT require a decorator
        - Do NOT automatically retry on conflicts
        - DO guarantee operation order
        - ARE more efficient for bulk writes

        This method provides a convenient way to create batches without
        manually accessing the underlying client. The returned batch
        object is a native Firestore WriteBatch/AsyncWriteBatch.

        Returns:
            A native google.cloud.firestore.WriteBatch or
            google.cloud.firestore.AsyncWriteBatch instance.

        Example (Synchronous):
            batch = db.batch()

            # Accumulate operations
            user1 = db.doc('users/alice')
            user1.credits = 100
            user1.save(batch=batch)

            user2 = db.doc('users/bob')
            user2.delete(batch=batch, recursive=False)

            # Commit all operations atomically
            batch.commit()

        Example (Asynchronous):
            batch = db.batch()

            # Accumulate operations
            user1 = db.doc('users/alice')
            user1.credits = 100
            await user1.save(batch=batch)

            user2 = db.doc('users/bob')
            await user2.delete(batch=batch, recursive=False)

            # Commit all operations atomically
            await batch.commit()

        Example (Bulk Operations):
            batch = db.batch()
            users = db.collection('users')

            # Create multiple documents in one batch
            for i in range(100):
                user = users.doc(f'user{i}')
                user.name = f'User {i}'
                user.save(batch=batch)

            # All 100 documents created atomically
            batch.commit()

        Note:
            - Batches can contain up to 500 operations
            - All operations execute atomically (all-or-nothing)
            - Operations execute in the order added
            - Cannot save DETACHED documents in a batch
        """
        return self._client.batch()

    # =========================================================================
    # Utility Methods (SHARED)
    # =========================================================================

    def _validate_path(self, path: str, path_type: str) -> None:
        """
        Validate a Firestore path.

        Internal utility to ensure paths conform to Firestore requirements.

        Args:
            path: The path to validate.
            path_type: Either 'document' or 'collection' for error messages.

        Raises:
            ValueError: If path is invalid (wrong segment count, invalid
                       characters, empty segments, etc.).
        """
        if not path:
            raise ValueError(f"Path cannot be empty for {path_type}")

        # Split path into segments
        segments = path.split('/')

        # Check for empty segments
        if any(not segment for segment in segments):
            raise ValueError(f"Path cannot contain empty segments: '{path}'")

        # Validate segment count based on type
        num_segments = len(segments)
        if path_type == 'document':
            if num_segments % 2 != 0:
                raise ValueError(
                    f"Document path must have even number of segments, got {num_segments}: '{path}'"
                )
        elif path_type == 'collection':
            if num_segments % 2 != 1:
                raise ValueError(
                    f"Collection path must have odd number of segments, got {num_segments}: '{path}'"
                )

    def _get_document_kwargs(self, path: str) -> Dict[str, Any]:
        """Return extra keyword arguments for document wrappers."""
        return {}

    def _get_collection_kwargs(self, path: str) -> Dict[str, Any]:
        """Return extra keyword arguments for collection wrappers."""
        return {}

    def _create_document_proxy(self, path: str, factory: Any) -> Any:
        """Validate and construct a document wrapper using the provided factory."""
        self._validate_path(path, 'document')
        doc_ref = self._client.document(path)
        kwargs: Dict[str, Any] = {
            'doc_ref': doc_ref,
            'initial_state': State.ATTACHED,
            'parent_collection': None,
        }
        kwargs.update(self._get_document_kwargs(path))
        return factory(**kwargs)

    def _create_collection_proxy(self, path: str, factory: Any) -> Any:
        """Validate and construct a collection wrapper using the provided factory."""
        self._validate_path(path, 'collection')
        collection_ref = self._client.collection(path)
        kwargs: Dict[str, Any] = {'collection_ref': collection_ref, 'client': self}
        kwargs.update(self._get_collection_kwargs(path))
        return factory(**kwargs)

    # =========================================================================
    # Special Methods (SHARED)
    # =========================================================================

    def __repr__(self) -> str:
        """
        Return a detailed string representation for debugging.

        Returns:
            String showing the project ID and database.
        """
        project = getattr(self._client, 'project', 'unknown')
        return f"<{type(self).__name__} project='{project}' database='(default)'>"

    def __str__(self) -> str:
        """
        Return a human-readable string representation.

        Returns:
            String showing the project ID.
        """
        project = getattr(self._client, 'project', 'unknown')
        return f"{type(self).__name__}({project})"

client property

Alias for native_client. Get the underlying Firestore Client.

Returns: The google.cloud.firestore.Client or AsyncClient instance.

native_client property

Get the underlying google-cloud-firestore Client.

Provides an "escape hatch" for users who need to perform operations not yet supported by FireProx or who want to use advanced native features like transactions, batched writes, or complex queries.

Returns: The google.cloud.firestore.Client or AsyncClient instance.

__init__(client)

Initialize FireProx with a native Firestore client.

Args: client: A configured google.cloud.firestore.Client or google.cloud.firestore.AsyncClient instance.

Note: Type checking is handled in subclasses since they know which client type to expect.

Source code in src/fire_prox/base_fireprox.py
27
28
29
30
31
32
33
34
35
36
37
38
39
def __init__(self, client: Any):
    """
    Initialize FireProx with a native Firestore client.

    Args:
        client: A configured google.cloud.firestore.Client or
               google.cloud.firestore.AsyncClient instance.

    Note:
        Type checking is handled in subclasses since they know
        which client type to expect.
    """
    self._client = client

__repr__()

Return a detailed string representation for debugging.

Returns: String showing the project ID and database.

Source code in src/fire_prox/base_fireprox.py
275
276
277
278
279
280
281
282
283
def __repr__(self) -> str:
    """
    Return a detailed string representation for debugging.

    Returns:
        String showing the project ID and database.
    """
    project = getattr(self._client, 'project', 'unknown')
    return f"<{type(self).__name__} project='{project}' database='(default)'>"

__str__()

Return a human-readable string representation.

Returns: String showing the project ID.

Source code in src/fire_prox/base_fireprox.py
285
286
287
288
289
290
291
292
293
def __str__(self) -> str:
    """
    Return a human-readable string representation.

    Returns:
        String showing the project ID.
    """
    project = getattr(self._client, 'project', 'unknown')
    return f"{type(self).__name__}({project})"

batch()

Create a batch for accumulating multiple write operations.

Returns the native Firestore WriteBatch object that can be used to accumulate write operations (set, update, delete) and commit them atomically in a single request.

Unlike transactions, batches: - Do NOT support read operations - Do NOT require a decorator - Do NOT automatically retry on conflicts - DO guarantee operation order - ARE more efficient for bulk writes

This method provides a convenient way to create batches without manually accessing the underlying client. The returned batch object is a native Firestore WriteBatch/AsyncWriteBatch.

Returns: A native google.cloud.firestore.WriteBatch or google.cloud.firestore.AsyncWriteBatch instance.

Example (Synchronous): batch = db.batch()

# Accumulate operations
user1 = db.doc('users/alice')
user1.credits = 100
user1.save(batch=batch)

user2 = db.doc('users/bob')
user2.delete(batch=batch, recursive=False)

# Commit all operations atomically
batch.commit()

Example (Asynchronous): batch = db.batch()

# Accumulate operations
user1 = db.doc('users/alice')
user1.credits = 100
await user1.save(batch=batch)

user2 = db.doc('users/bob')
await user2.delete(batch=batch, recursive=False)

# Commit all operations atomically
await batch.commit()

Example (Bulk Operations): batch = db.batch() users = db.collection('users')

# Create multiple documents in one batch
for i in range(100):
    user = users.doc(f'user{i}')
    user.name = f'User {i}'
    user.save(batch=batch)

# All 100 documents created atomically
batch.commit()

Note: - Batches can contain up to 500 operations - All operations execute atomically (all-or-nothing) - Operations execute in the order added - Cannot save DETACHED documents in a batch

Source code in src/fire_prox/base_fireprox.py
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
def batch(self) -> Any:
    """
    Create a batch for accumulating multiple write operations.

    Returns the native Firestore WriteBatch object that can be used
    to accumulate write operations (set, update, delete) and commit
    them atomically in a single request.

    Unlike transactions, batches:
    - Do NOT support read operations
    - Do NOT require a decorator
    - Do NOT automatically retry on conflicts
    - DO guarantee operation order
    - ARE more efficient for bulk writes

    This method provides a convenient way to create batches without
    manually accessing the underlying client. The returned batch
    object is a native Firestore WriteBatch/AsyncWriteBatch.

    Returns:
        A native google.cloud.firestore.WriteBatch or
        google.cloud.firestore.AsyncWriteBatch instance.

    Example (Synchronous):
        batch = db.batch()

        # Accumulate operations
        user1 = db.doc('users/alice')
        user1.credits = 100
        user1.save(batch=batch)

        user2 = db.doc('users/bob')
        user2.delete(batch=batch, recursive=False)

        # Commit all operations atomically
        batch.commit()

    Example (Asynchronous):
        batch = db.batch()

        # Accumulate operations
        user1 = db.doc('users/alice')
        user1.credits = 100
        await user1.save(batch=batch)

        user2 = db.doc('users/bob')
        await user2.delete(batch=batch, recursive=False)

        # Commit all operations atomically
        await batch.commit()

    Example (Bulk Operations):
        batch = db.batch()
        users = db.collection('users')

        # Create multiple documents in one batch
        for i in range(100):
            user = users.doc(f'user{i}')
            user.name = f'User {i}'
            user.save(batch=batch)

        # All 100 documents created atomically
        batch.commit()

    Note:
        - Batches can contain up to 500 operations
        - All operations execute atomically (all-or-nothing)
        - Operations execute in the order added
        - Cannot save DETACHED documents in a batch
    """
    return self._client.batch()

transaction()

Create a transaction for atomic read-modify-write operations.

Returns the native Firestore transaction object that can be used with the @firestore.transactional decorator for synchronous operations or @firestore.async_transactional for asynchronous operations.

This method provides a convenient way to create transactions without manually accessing the underlying client. The returned transaction object is a native Firestore Transaction that should be passed to functions decorated with @firestore.transactional.

Returns: A native google.cloud.firestore.Transaction or google.cloud.firestore.AsyncTransaction instance.

Example (Synchronous): transaction = db.transaction()

@firestore.transactional
def transfer_credits(transaction, from_id, to_id, amount):
    from_user = db.doc(f'users/{from_id}')
    to_user = db.doc(f'users/{to_id}')

    from_user.fetch(transaction=transaction)
    to_user.fetch(transaction=transaction)

    from_user.credits -= amount
    to_user.credits += amount

    from_user.save(transaction=transaction)
    to_user.save(transaction=transaction)

transfer_credits(transaction, 'alice', 'bob', 100)

Example (Asynchronous): transaction = db.transaction()

@firestore.async_transactional
async def transfer_credits(transaction, from_id, to_id, amount):
    from_user = db.doc(f'users/{from_id}')
    to_user = db.doc(f'users/{to_id}')

    await from_user.fetch(transaction=transaction)
    await to_user.fetch(transaction=transaction)

    from_user.credits -= amount
    to_user.credits += amount

    await from_user.save(transaction=transaction)
    await to_user.save(transaction=transaction)

await transfer_credits(transaction, 'alice', 'bob', 100)
Source code in src/fire_prox/base_fireprox.py
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
def transaction(self) -> Any:
    """
    Create a transaction for atomic read-modify-write operations.

    Returns the native Firestore transaction object that can be used
    with the @firestore.transactional decorator for synchronous operations
    or @firestore.async_transactional for asynchronous operations.

    This method provides a convenient way to create transactions without
    manually accessing the underlying client. The returned transaction
    object is a native Firestore Transaction that should be passed to
    functions decorated with @firestore.transactional.

    Returns:
        A native google.cloud.firestore.Transaction or
        google.cloud.firestore.AsyncTransaction instance.

    Example (Synchronous):
        transaction = db.transaction()

        @firestore.transactional
        def transfer_credits(transaction, from_id, to_id, amount):
            from_user = db.doc(f'users/{from_id}')
            to_user = db.doc(f'users/{to_id}')

            from_user.fetch(transaction=transaction)
            to_user.fetch(transaction=transaction)

            from_user.credits -= amount
            to_user.credits += amount

            from_user.save(transaction=transaction)
            to_user.save(transaction=transaction)

        transfer_credits(transaction, 'alice', 'bob', 100)

    Example (Asynchronous):
        transaction = db.transaction()

        @firestore.async_transactional
        async def transfer_credits(transaction, from_id, to_id, amount):
            from_user = db.doc(f'users/{from_id}')
            to_user = db.doc(f'users/{to_id}')

            await from_user.fetch(transaction=transaction)
            await to_user.fetch(transaction=transaction)

            from_user.credits -= amount
            to_user.credits += amount

            await from_user.save(transaction=transaction)
            await to_user.save(transaction=transaction)

        await transfer_credits(transaction, 'alice', 'bob', 100)
    """
    return self._client.transaction()

fire_collection

FireCollection: Interface for working with Firestore collections (synchronous).

This module provides the synchronous FireCollection class, which represents a Firestore collection and provides methods for creating new documents and querying existing ones.

FireCollection

Bases: BaseFireCollection

A wrapper around Firestore CollectionReference for document management (synchronous).

FireCollection provides a simplified interface for creating new documents and querying collections. It serves as a factory for FireObject instances and (in Phase 2) will provide a lightweight query builder.

This is the synchronous implementation.

Usage Examples: # Get a collection users = db.collection('users')

# Create a new document in DETACHED state
new_user = users.new()
new_user.name = 'Ada Lovelace'
new_user.year = 1815
new_user.save()

# Create with explicit ID
user = users.new()
user.name = 'Charles Babbage'
user.save(doc_id='cbabbage')

# Phase 2: Query the collection
query = users.where('year', '>', 1800).limit(10)
for user in query.get():
    print(user.name)
Source code in src/fire_prox/fire_collection.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
class FireCollection(BaseFireCollection):
    """
    A wrapper around Firestore CollectionReference for document management (synchronous).

    FireCollection provides a simplified interface for creating new documents
    and querying collections. It serves as a factory for FireObject instances
    and (in Phase 2) will provide a lightweight query builder.

    This is the synchronous implementation.

    Usage Examples:
        # Get a collection
        users = db.collection('users')

        # Create a new document in DETACHED state
        new_user = users.new()
        new_user.name = 'Ada Lovelace'
        new_user.year = 1815
        new_user.save()

        # Create with explicit ID
        user = users.new()
        user.name = 'Charles Babbage'
        user.save(doc_id='cbabbage')

        # Phase 2: Query the collection
        query = users.where('year', '>', 1800).limit(10)
        for user in query.get():
            print(user.name)
    """

    # =========================================================================
    # Document Creation
    # =========================================================================

    def _instantiate_object(
        self,
        *,
        doc_ref: Any,
        initial_state: State,
        parent_collection: 'FireCollection',
        **_: Any,
    ) -> FireObject:
        """Instantiate the synchronous FireObject wrapper."""
        return FireObject(
            doc_ref=doc_ref,
            initial_state=initial_state,
            parent_collection=parent_collection,
        )

    def new(self) -> FireObject:
        """Create a new FireObject in DETACHED state."""
        return super().new()

    def doc(self, doc_id: str) -> FireObject:
        """Get a reference to a specific document in this collection."""
        return super().doc(doc_id)

    # =========================================================================
    # Parent Property (Phase 2)
    # =========================================================================

    @property
    def parent(self) -> Optional[FireObject]:
        """
        Get the parent document if this is a subcollection.

        Returns:
            FireObject representing the parent document if this is a
            subcollection, None if this is a root-level collection.

        Note:
            Phase 2 feature. Returns None in Phase 1 as subcollections
            are not yet implemented.

        Example:
            posts = db.doc('users/alovelace').collection('posts')
            parent = posts.parent
            print(parent.path)  # 'users/alovelace'
        """
        raise NotImplementedError("Phase 2 feature - subcollections")

    # =========================================================================
    # Query Methods (Phase 2)
    # =========================================================================

    def where(self, field: str, op: str, value: Any) -> 'FireQuery':
        """
        Create a query with a filter condition.

        Phase 2.5 feature. Builds a lightweight query for common filtering
        needs. For complex queries, users should use the native API and
        hydrate results with FireObject.from_snapshot().

        Args:
            field: The field path to filter on (e.g., 'name', 'address.city').
            op: Comparison operator: '==', '!=', '<', '<=', '>', '>=',
                'in', 'not-in', 'array-contains', 'array-contains-any'.
            value: The value to compare against.

        Returns:
            A FireQuery instance for method chaining.

        Example:
            query = users.where('birth_year', '>', 1800)
                        .where('country', '==', 'UK')
                        .limit(10)
            for user in query.get():
                print(user.name)
        """
        from google.cloud.firestore_v1.base_query import FieldFilter

        from .fire_query import FireQuery

        # Create initial query with filter
        filter_obj = FieldFilter(field, op, value)
        native_query = self._collection_ref.where(filter=filter_obj)
        return FireQuery(native_query, parent_collection=self)

    def order_by(
        self,
        field: str,
        direction: str = 'ASCENDING'
    ) -> 'FireQuery':
        """
        Create a query with ordering.

        Phase 2.5 feature. Orders results by a field.

        Args:
            field: The field path to order by.
            direction: 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

        Returns:
            A FireQuery instance for method chaining.
        """
        from google.cloud.firestore_v1 import Query as QueryClass

        from .fire_query import FireQuery

        # Convert direction string to constant
        if direction.upper() == 'ASCENDING':
            direction_const = QueryClass.ASCENDING
        elif direction.upper() == 'DESCENDING':
            direction_const = QueryClass.DESCENDING
        else:
            raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

        # Create query with ordering
        native_query = self._collection_ref.order_by(field, direction=direction_const)
        return FireQuery(native_query, parent_collection=self)

    def limit(self, count: int) -> 'FireQuery':
        """
        Create a query with a result limit.

        Phase 2.5 feature. Limits the number of results returned.

        Args:
            count: Maximum number of results to return.

        Returns:
            A FireQuery instance for method chaining.
        """
        from .fire_query import FireQuery

        if count <= 0:
            raise ValueError(f"Limit count must be positive, got {count}")

        # Create query with limit
        native_query = self._collection_ref.limit(count)
        return FireQuery(native_query, parent_collection=self)

    def select(self, *field_paths: str) -> 'FireQuery':
        """
        Create a query with field projection.

        Phase 4 Part 3 feature. Selects specific fields to return in query results.
        Returns vanilla dictionaries instead of FireObject instances.

        Args:
            *field_paths: One or more field paths to select.

        Returns:
            A FireQuery instance with projection applied.

        Example:
            # Select specific fields
            results = users.select('name', 'email').get()
            # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]
        """
        from .fire_query import FireQuery

        if not field_paths:
            raise ValueError("select() requires at least one field path")

        # Create query with projection
        native_query = self._collection_ref.select(list(field_paths))
        return FireQuery(native_query, parent_collection=self, projection=field_paths)

    def get_all(self) -> Iterator[FireObject]:
        """
        Retrieve all documents in the collection.

        Phase 2.5 feature. Returns an iterator of all documents.

        Yields:
            FireObject instances in LOADED state for each document.

        Example:
            for user in users.get_all():
                print(f"{user.name}: {user.year}")
        """
        # Stream all documents from the collection
        for snapshot in self._collection_ref.stream():
            yield FireObject.from_snapshot(snapshot, parent_collection=self)

    # =========================================================================
    # Vector Query Methods
    # =========================================================================

    def find_nearest(
        self,
        vector_field: str,
        query_vector: Any,
        distance_measure: Any,
        limit: int,
        distance_result_field: Optional[str] = None,
    ) -> 'FireQuery':
        """
        Find the nearest neighbors based on vector similarity.

        Performs a vector similarity search to find documents with embeddings
        nearest to the query vector. Requires a single-field vector index on
        the vector_field.

        Args:
            vector_field: Name of the field containing vector embeddings.
            query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
            distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
                DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
            limit: Maximum number of nearest neighbors to return (max 1000).
            distance_result_field: Optional field name to store the calculated distance
                in the query results.

        Returns:
            A FireQuery instance for method chaining and execution.

        Example:
            from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
            from google.cloud.firestore_v1.vector import Vector

            collection = db.collection("documents")
            query = collection.find_nearest(
                vector_field="embedding",
                query_vector=Vector([0.1, 0.2, 0.3]),
                distance_measure=DistanceMeasure.EUCLIDEAN,
                limit=5
            )
            for doc in query.get():
                print(f"{doc.title}: {doc.embedding}")

        Note:
            - Requires a vector index on the vector_field
            - Maximum limit is 1000 documents
            - Can be combined with where() for pre-filtering (requires composite index)
            - Does not work with Firestore emulator (production only)
        """
        from .fire_query import FireQuery

        # Create vector query using native find_nearest
        native_query = self._collection_ref.find_nearest(
            vector_field=vector_field,
            query_vector=query_vector,
            distance_measure=distance_measure,
            limit=limit,
            distance_result_field=distance_result_field,
        )
        return FireQuery(native_query, parent_collection=self)

    # =========================================================================
    # Aggregation Methods (Phase 4 Part 5)
    # =========================================================================

    def count(self) -> int:
        """
        Count documents in the collection.

        Phase 4 Part 5 feature. Returns the total count of documents
        without fetching their data.

        Returns:
            The number of documents in the collection.

        Example:
            total = users.count()
            print(f"Total users: {total}")
        """
        from .fire_query import FireQuery
        # Use collection reference directly as a query for aggregation
        query = FireQuery(self._collection_ref, parent_collection=self)
        return query.count()

    def sum(self, field: str):
        """
        Sum a numeric field across all documents.

        Phase 4 Part 5 feature. Calculates the sum of a numeric field
        without fetching document data.

        Args:
            field: The field name to sum.

        Returns:
            The sum of the field values (int or float).

        Example:
            total_revenue = orders.sum('amount')
        """
        from .fire_query import FireQuery
        # Use collection reference directly as a query for aggregation
        query = FireQuery(self._collection_ref, parent_collection=self)
        return query.sum(field)

    def avg(self, field: str) -> float:
        """
        Average a numeric field across all documents.

        Phase 4 Part 5 feature. Calculates the average of a numeric field
        without fetching document data.

        Args:
            field: The field name to average.

        Returns:
            The average of the field values (float).

        Example:
            avg_rating = products.avg('rating')
        """
        from .fire_query import FireQuery
        # Use collection reference directly as a query for aggregation
        query = FireQuery(self._collection_ref, parent_collection=self)
        return query.avg(field)

    def aggregate(self, **aggregations):
        """
        Execute multiple aggregations in a single query.

        Phase 4 Part 5 feature. Performs multiple aggregation operations
        (count, sum, avg) in one efficient query.

        Args:
            **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

        Returns:
            Dictionary mapping aggregation names to their results.

        Example:
            from fire_prox import Count, Sum, Avg

            stats = users.aggregate(
                total=Count(),
                total_score=Sum('score'),
                avg_age=Avg('age')
            )
            # Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
        """
        from .fire_query import FireQuery
        # Use collection reference directly as a query for aggregation
        query = FireQuery(self._collection_ref, parent_collection=self)
        return query.aggregate(**aggregations)

    # =========================================================================
    # Collection Deletion
    # =========================================================================

    def delete_all(
        self,
        *,
        batch_size: int = 50,
        recursive: bool = True,
        dry_run: bool = False,
    ) -> Dict[str, int]:
        """
        Delete every document in this collection.

        Firestore offers no atomic "drop collection" operation. This helper
        iterates through each document and issues batched deletes. When
        recursive is True (default) it will also clear any nested subcollections
        before deleting their parent document.

        Args:
            batch_size: Maximum number of deletes to commit at once.
            recursive: Whether to delete nested subcollections.
            dry_run: Count what would be removed without executing writes.

        Returns:
            Dictionary with counts for deleted documents and subcollections
            visited during recursion.

        Raises:
            ValueError: If batch_size is not positive.
        """
        self._validate_batch_size(batch_size)

        return self._delete_collection_recursive(
            collection_ref=self._collection_ref,
            batch_size=batch_size,
            recursive=recursive,
            dry_run=dry_run,
            include_self=False,
        )

    def _delete_collection_recursive(
        self,
        *,
        collection_ref: Any,
        batch_size: int,
        recursive: bool,
        dry_run: bool,
        include_self: bool,
    ) -> Dict[str, int]:
        """Internal helper to delete documents within a collection reference."""
        client = collection_ref._client
        stats = {'documents': 0, 'collections': 1 if include_self else 0}
        batch = None if dry_run else client.batch()
        ops_in_batch = 0

        for doc_ref in collection_ref.list_documents(page_size=batch_size):
            if recursive:
                sub_stats = self._delete_document_subcollections(
                    doc_ref,
                    batch_size=batch_size,
                    recursive=recursive,
                    dry_run=dry_run,
                )
                stats['documents'] += sub_stats['documents']
                stats['collections'] += sub_stats['collections']

            if not dry_run and batch is not None:
                batch.delete(doc_ref)
                ops_in_batch += 1

            stats['documents'] += 1

            if not dry_run and batch is not None and ops_in_batch >= batch_size:
                batch.commit()
                batch = client.batch()
                ops_in_batch = 0

        if not dry_run and batch is not None and ops_in_batch:
            batch.commit()

        return stats

    def _delete_document_subcollections(
        self,
        doc_ref: Any,
        *,
        batch_size: int,
        recursive: bool,
        dry_run: bool,
    ) -> Dict[str, int]:
        """Delete all subcollections hanging off a document reference."""
        stats = {'documents': 0, 'collections': 0}

        for subcollection_ref in doc_ref.collections():
            sub_stats = self._delete_collection_recursive(
                collection_ref=subcollection_ref,
                batch_size=batch_size,
                recursive=recursive,
                dry_run=dry_run,
                include_self=True,
            )
            stats['documents'] += sub_stats['documents']
            stats['collections'] += sub_stats['collections']

        return stats

parent property

Get the parent document if this is a subcollection.

Returns: FireObject representing the parent document if this is a subcollection, None if this is a root-level collection.

Note: Phase 2 feature. Returns None in Phase 1 as subcollections are not yet implemented.

Example: posts = db.doc('users/alovelace').collection('posts') parent = posts.parent print(parent.path) # 'users/alovelace'

aggregate(**aggregations)

Execute multiple aggregations in a single query.

Phase 4 Part 5 feature. Performs multiple aggregation operations (count, sum, avg) in one efficient query.

Args: **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

Returns: Dictionary mapping aggregation names to their results.

Example: from fire_prox import Count, Sum, Avg

stats = users.aggregate(
    total=Count(),
    total_score=Sum('score'),
    avg_age=Avg('age')
)
# Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
Source code in src/fire_prox/fire_collection.py
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
def aggregate(self, **aggregations):
    """
    Execute multiple aggregations in a single query.

    Phase 4 Part 5 feature. Performs multiple aggregation operations
    (count, sum, avg) in one efficient query.

    Args:
        **aggregations: Named aggregation operations using Count(), Sum(), or Avg().

    Returns:
        Dictionary mapping aggregation names to their results.

    Example:
        from fire_prox import Count, Sum, Avg

        stats = users.aggregate(
            total=Count(),
            total_score=Sum('score'),
            avg_age=Avg('age')
        )
        # Returns: {'total': 42, 'total_score': 5000, 'avg_age': 28.5}
    """
    from .fire_query import FireQuery
    # Use collection reference directly as a query for aggregation
    query = FireQuery(self._collection_ref, parent_collection=self)
    return query.aggregate(**aggregations)

avg(field)

Average a numeric field across all documents.

Phase 4 Part 5 feature. Calculates the average of a numeric field without fetching document data.

Args: field: The field name to average.

Returns: The average of the field values (float).

Example: avg_rating = products.avg('rating')

Source code in src/fire_prox/fire_collection.py
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
def avg(self, field: str) -> float:
    """
    Average a numeric field across all documents.

    Phase 4 Part 5 feature. Calculates the average of a numeric field
    without fetching document data.

    Args:
        field: The field name to average.

    Returns:
        The average of the field values (float).

    Example:
        avg_rating = products.avg('rating')
    """
    from .fire_query import FireQuery
    # Use collection reference directly as a query for aggregation
    query = FireQuery(self._collection_ref, parent_collection=self)
    return query.avg(field)

count()

Count documents in the collection.

Phase 4 Part 5 feature. Returns the total count of documents without fetching their data.

Returns: The number of documents in the collection.

Example: total = users.count() print(f"Total users: {total}")

Source code in src/fire_prox/fire_collection.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def count(self) -> int:
    """
    Count documents in the collection.

    Phase 4 Part 5 feature. Returns the total count of documents
    without fetching their data.

    Returns:
        The number of documents in the collection.

    Example:
        total = users.count()
        print(f"Total users: {total}")
    """
    from .fire_query import FireQuery
    # Use collection reference directly as a query for aggregation
    query = FireQuery(self._collection_ref, parent_collection=self)
    return query.count()

delete_all(*, batch_size=50, recursive=True, dry_run=False)

Delete every document in this collection.

Firestore offers no atomic "drop collection" operation. This helper iterates through each document and issues batched deletes. When recursive is True (default) it will also clear any nested subcollections before deleting their parent document.

Args: batch_size: Maximum number of deletes to commit at once. recursive: Whether to delete nested subcollections. dry_run: Count what would be removed without executing writes.

Returns: Dictionary with counts for deleted documents and subcollections visited during recursion.

Raises: ValueError: If batch_size is not positive.

Source code in src/fire_prox/fire_collection.py
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
def delete_all(
    self,
    *,
    batch_size: int = 50,
    recursive: bool = True,
    dry_run: bool = False,
) -> Dict[str, int]:
    """
    Delete every document in this collection.

    Firestore offers no atomic "drop collection" operation. This helper
    iterates through each document and issues batched deletes. When
    recursive is True (default) it will also clear any nested subcollections
    before deleting their parent document.

    Args:
        batch_size: Maximum number of deletes to commit at once.
        recursive: Whether to delete nested subcollections.
        dry_run: Count what would be removed without executing writes.

    Returns:
        Dictionary with counts for deleted documents and subcollections
        visited during recursion.

    Raises:
        ValueError: If batch_size is not positive.
    """
    self._validate_batch_size(batch_size)

    return self._delete_collection_recursive(
        collection_ref=self._collection_ref,
        batch_size=batch_size,
        recursive=recursive,
        dry_run=dry_run,
        include_self=False,
    )

doc(doc_id)

Get a reference to a specific document in this collection.

Source code in src/fire_prox/fire_collection.py
73
74
75
def doc(self, doc_id: str) -> FireObject:
    """Get a reference to a specific document in this collection."""
    return super().doc(doc_id)

find_nearest(vector_field, query_vector, distance_measure, limit, distance_result_field=None)

Find the nearest neighbors based on vector similarity.

Performs a vector similarity search to find documents with embeddings nearest to the query vector. Requires a single-field vector index on the vector_field.

Args: vector_field: Name of the field containing vector embeddings. query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector). distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN, DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT). limit: Maximum number of nearest neighbors to return (max 1000). distance_result_field: Optional field name to store the calculated distance in the query results.

Returns: A FireQuery instance for method chaining and execution.

Example: from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector

collection = db.collection("documents")
query = collection.find_nearest(
    vector_field="embedding",
    query_vector=Vector([0.1, 0.2, 0.3]),
    distance_measure=DistanceMeasure.EUCLIDEAN,
    limit=5
)
for doc in query.get():
    print(f"{doc.title}: {doc.embedding}")

Note: - Requires a vector index on the vector_field - Maximum limit is 1000 documents - Can be combined with where() for pre-filtering (requires composite index) - Does not work with Firestore emulator (production only)

Source code in src/fire_prox/fire_collection.py
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
def find_nearest(
    self,
    vector_field: str,
    query_vector: Any,
    distance_measure: Any,
    limit: int,
    distance_result_field: Optional[str] = None,
) -> 'FireQuery':
    """
    Find the nearest neighbors based on vector similarity.

    Performs a vector similarity search to find documents with embeddings
    nearest to the query vector. Requires a single-field vector index on
    the vector_field.

    Args:
        vector_field: Name of the field containing vector embeddings.
        query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
        distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
            DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
        limit: Maximum number of nearest neighbors to return (max 1000).
        distance_result_field: Optional field name to store the calculated distance
            in the query results.

    Returns:
        A FireQuery instance for method chaining and execution.

    Example:
        from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
        from google.cloud.firestore_v1.vector import Vector

        collection = db.collection("documents")
        query = collection.find_nearest(
            vector_field="embedding",
            query_vector=Vector([0.1, 0.2, 0.3]),
            distance_measure=DistanceMeasure.EUCLIDEAN,
            limit=5
        )
        for doc in query.get():
            print(f"{doc.title}: {doc.embedding}")

    Note:
        - Requires a vector index on the vector_field
        - Maximum limit is 1000 documents
        - Can be combined with where() for pre-filtering (requires composite index)
        - Does not work with Firestore emulator (production only)
    """
    from .fire_query import FireQuery

    # Create vector query using native find_nearest
    native_query = self._collection_ref.find_nearest(
        vector_field=vector_field,
        query_vector=query_vector,
        distance_measure=distance_measure,
        limit=limit,
        distance_result_field=distance_result_field,
    )
    return FireQuery(native_query, parent_collection=self)

get_all()

Retrieve all documents in the collection.

Phase 2.5 feature. Returns an iterator of all documents.

Yields: FireObject instances in LOADED state for each document.

Example: for user in users.get_all(): print(f"{user.name}: {user.year}")

Source code in src/fire_prox/fire_collection.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
def get_all(self) -> Iterator[FireObject]:
    """
    Retrieve all documents in the collection.

    Phase 2.5 feature. Returns an iterator of all documents.

    Yields:
        FireObject instances in LOADED state for each document.

    Example:
        for user in users.get_all():
            print(f"{user.name}: {user.year}")
    """
    # Stream all documents from the collection
    for snapshot in self._collection_ref.stream():
        yield FireObject.from_snapshot(snapshot, parent_collection=self)

limit(count)

Create a query with a result limit.

Phase 2.5 feature. Limits the number of results returned.

Args: count: Maximum number of results to return.

Returns: A FireQuery instance for method chaining.

Source code in src/fire_prox/fire_collection.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def limit(self, count: int) -> 'FireQuery':
    """
    Create a query with a result limit.

    Phase 2.5 feature. Limits the number of results returned.

    Args:
        count: Maximum number of results to return.

    Returns:
        A FireQuery instance for method chaining.
    """
    from .fire_query import FireQuery

    if count <= 0:
        raise ValueError(f"Limit count must be positive, got {count}")

    # Create query with limit
    native_query = self._collection_ref.limit(count)
    return FireQuery(native_query, parent_collection=self)

new()

Create a new FireObject in DETACHED state.

Source code in src/fire_prox/fire_collection.py
69
70
71
def new(self) -> FireObject:
    """Create a new FireObject in DETACHED state."""
    return super().new()

order_by(field, direction='ASCENDING')

Create a query with ordering.

Phase 2.5 feature. Orders results by a field.

Args: field: The field path to order by. direction: 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

Returns: A FireQuery instance for method chaining.

Source code in src/fire_prox/fire_collection.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
def order_by(
    self,
    field: str,
    direction: str = 'ASCENDING'
) -> 'FireQuery':
    """
    Create a query with ordering.

    Phase 2.5 feature. Orders results by a field.

    Args:
        field: The field path to order by.
        direction: 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

    Returns:
        A FireQuery instance for method chaining.
    """
    from google.cloud.firestore_v1 import Query as QueryClass

    from .fire_query import FireQuery

    # Convert direction string to constant
    if direction.upper() == 'ASCENDING':
        direction_const = QueryClass.ASCENDING
    elif direction.upper() == 'DESCENDING':
        direction_const = QueryClass.DESCENDING
    else:
        raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

    # Create query with ordering
    native_query = self._collection_ref.order_by(field, direction=direction_const)
    return FireQuery(native_query, parent_collection=self)

select(*field_paths)

Create a query with field projection.

Phase 4 Part 3 feature. Selects specific fields to return in query results. Returns vanilla dictionaries instead of FireObject instances.

Args: *field_paths: One or more field paths to select.

Returns: A FireQuery instance with projection applied.

Example: # Select specific fields results = users.select('name', 'email').get() # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]

Source code in src/fire_prox/fire_collection.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
def select(self, *field_paths: str) -> 'FireQuery':
    """
    Create a query with field projection.

    Phase 4 Part 3 feature. Selects specific fields to return in query results.
    Returns vanilla dictionaries instead of FireObject instances.

    Args:
        *field_paths: One or more field paths to select.

    Returns:
        A FireQuery instance with projection applied.

    Example:
        # Select specific fields
        results = users.select('name', 'email').get()
        # Returns: [{'name': 'Alice', 'email': 'alice@example.com'}, ...]
    """
    from .fire_query import FireQuery

    if not field_paths:
        raise ValueError("select() requires at least one field path")

    # Create query with projection
    native_query = self._collection_ref.select(list(field_paths))
    return FireQuery(native_query, parent_collection=self, projection=field_paths)

sum(field)

Sum a numeric field across all documents.

Phase 4 Part 5 feature. Calculates the sum of a numeric field without fetching document data.

Args: field: The field name to sum.

Returns: The sum of the field values (int or float).

Example: total_revenue = orders.sum('amount')

Source code in src/fire_prox/fire_collection.py
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
def sum(self, field: str):
    """
    Sum a numeric field across all documents.

    Phase 4 Part 5 feature. Calculates the sum of a numeric field
    without fetching document data.

    Args:
        field: The field name to sum.

    Returns:
        The sum of the field values (int or float).

    Example:
        total_revenue = orders.sum('amount')
    """
    from .fire_query import FireQuery
    # Use collection reference directly as a query for aggregation
    query = FireQuery(self._collection_ref, parent_collection=self)
    return query.sum(field)

where(field, op, value)

Create a query with a filter condition.

Phase 2.5 feature. Builds a lightweight query for common filtering needs. For complex queries, users should use the native API and hydrate results with FireObject.from_snapshot().

Args: field: The field path to filter on (e.g., 'name', 'address.city'). op: Comparison operator: '==', '!=', '<', '<=', '>', '>=', 'in', 'not-in', 'array-contains', 'array-contains-any'. value: The value to compare against.

Returns: A FireQuery instance for method chaining.

Example: query = users.where('birth_year', '>', 1800) .where('country', '==', 'UK') .limit(10) for user in query.get(): print(user.name)

Source code in src/fire_prox/fire_collection.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def where(self, field: str, op: str, value: Any) -> 'FireQuery':
    """
    Create a query with a filter condition.

    Phase 2.5 feature. Builds a lightweight query for common filtering
    needs. For complex queries, users should use the native API and
    hydrate results with FireObject.from_snapshot().

    Args:
        field: The field path to filter on (e.g., 'name', 'address.city').
        op: Comparison operator: '==', '!=', '<', '<=', '>', '>=',
            'in', 'not-in', 'array-contains', 'array-contains-any'.
        value: The value to compare against.

    Returns:
        A FireQuery instance for method chaining.

    Example:
        query = users.where('birth_year', '>', 1800)
                    .where('country', '==', 'UK')
                    .limit(10)
        for user in query.get():
            print(user.name)
    """
    from google.cloud.firestore_v1.base_query import FieldFilter

    from .fire_query import FireQuery

    # Create initial query with filter
    filter_obj = FieldFilter(field, op, value)
    native_query = self._collection_ref.where(filter=filter_obj)
    return FireQuery(native_query, parent_collection=self)

fire_object

FireObject: The core proxy class for Firestore documents (synchronous).

This module implements the synchronous FireObject class, which serves as a schemaless, state-aware proxy for Firestore documents.

FireObject

Bases: BaseFireObject

A schemaless, state-aware proxy for a Firestore document (synchronous).

FireObject provides an object-oriented interface to Firestore documents, allowing attribute-style access to document fields and automatic state management throughout the document's lifecycle.

The object maintains an internal state machine (DETACHED -> ATTACHED -> LOADED -> DELETED) and tracks modifications to enable efficient partial updates.

This is the synchronous implementation that supports lazy loading via automatic fetch on attribute access.

Usage Examples: # Create a new document (DETACHED state) user = collection.new() user.name = 'Ada Lovelace' user.year = 1815 user.save() # Transitions to LOADED

# Load existing document (ATTACHED -> LOADED on access)
user = db.doc('users/alovelace')  # ATTACHED state
print(user.name)  # Triggers fetch, transitions to LOADED

# Update and save
user.year = 1816  # Marks as dirty
user.save()  # Performs update

# Delete
user.delete()  # Transitions to DELETED
Source code in src/fire_prox/fire_object.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
class FireObject(BaseFireObject):
    """
    A schemaless, state-aware proxy for a Firestore document (synchronous).

    FireObject provides an object-oriented interface to Firestore documents,
    allowing attribute-style access to document fields and automatic state
    management throughout the document's lifecycle.

    The object maintains an internal state machine (DETACHED -> ATTACHED ->
    LOADED -> DELETED) and tracks modifications to enable efficient partial
    updates.

    This is the synchronous implementation that supports lazy loading via
    automatic fetch on attribute access.

    Usage Examples:
        # Create a new document (DETACHED state)
        user = collection.new()
        user.name = 'Ada Lovelace'
        user.year = 1815
        user.save()  # Transitions to LOADED

        # Load existing document (ATTACHED -> LOADED on access)
        user = db.doc('users/alovelace')  # ATTACHED state
        print(user.name)  # Triggers fetch, transitions to LOADED

        # Update and save
        user.year = 1816  # Marks as dirty
        user.save()  # Performs update

        # Delete
        user.delete()  # Transitions to DELETED
    """

    # =========================================================================
    # Firestore I/O Hooks
    # =========================================================================

    def _get_snapshot(self, transaction: Optional[Any] = None) -> DocumentSnapshot:
        """Retrieve a document snapshot using the synchronous client."""
        if transaction is not None:
            return self._doc_ref.get(transaction=transaction)
        return self._doc_ref.get()

    def _create_document(self, doc_id: Optional[str] = None) -> DocumentReference:
        """Create a new synchronous document reference for DETACHED saves."""
        if not self._parent_collection:
            raise ValueError("DETACHED object has no parent collection")

        collection_ref = self._parent_collection._collection_ref
        if doc_id:
            doc_ref = collection_ref.document(doc_id)
        else:
            doc_ref = collection_ref.document()

        object.__setattr__(self, '_doc_ref', doc_ref)
        return doc_ref

    def _write_set(
        self,
        data: Dict[str, Any],
        doc_ref: Optional[DocumentReference] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> None:
        """Persist data via a set call on the synchronous client."""
        target_ref = doc_ref or self._doc_ref

        if transaction is not None:
            transaction.set(target_ref, data)
        elif batch is not None:
            batch.set(target_ref, data)
        else:
            target_ref.set(data)

    def _write_update(
        self,
        update_dict: Dict[str, Any],
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> None:
        """Perform an update operation using the synchronous client."""
        if transaction is not None:
            transaction.update(self._doc_ref, update_dict)
        elif batch is not None:
            batch.update(self._doc_ref, update_dict)
        else:
            self._doc_ref.update(update_dict)

    def _write_delete(self, batch: Optional[Any] = None) -> None:
        """Delete the document using the synchronous client."""
        if batch is not None:
            batch.delete(self._doc_ref)
        else:
            self._doc_ref.delete()

    # =========================================================================
    # Dynamic Attribute Handling (Sync-specific for lazy loading)
    # =========================================================================

    def __getattr__(self, name: str) -> Any:
        """
        Handle attribute access for document fields with lazy loading.

        This method implements lazy loading: if the object is in ATTACHED state,
        accessing any data attribute will automatically trigger a fetch() to load
        the data from Firestore.

        Args:
            name: The attribute name being accessed.

        Returns:
            The value of the field from the internal _data cache.

        Raises:
            AttributeError: If the attribute doesn't exist in _data after
                           fetching (if necessary).

        State Transitions:
            ATTACHED -> LOADED: Automatically fetches data on first access.

        Example:
            user = db.doc('users/alovelace')  # ATTACHED
            name = user.name  # Triggers fetch, transitions to LOADED
            year = user.year  # No fetch needed, already LOADED
        """
        # Check if we're accessing internal data
        if name == '_data':
            raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")

        # If we're in ATTACHED state, trigger lazy loading
        if self._state == State.ATTACHED:
            # Synchronous fetch for lazy loading
            self.fetch()

        return self._materialize_field(name)

    # =========================================================================
    # Core Lifecycle Methods (Sync-specific I/O)
    # =========================================================================

    def fetch(self, force: bool = False, transaction: Optional[Any] = None) -> 'FireObject':
        """
        Fetch document data from Firestore (synchronous).

        Retrieves the latest data from Firestore and populates the internal
        _data cache. This method transitions ATTACHED objects to LOADED state
        and can refresh data for already-LOADED objects.

        Args:
            force: If True, fetch data even if already LOADED. Useful for
                  refreshing data to get latest changes from Firestore.
                  Default is False.
            transaction: Optional transaction object for transactional reads.
                        If provided, the read will be part of the transaction.

        Returns:
            Self, to allow method chaining.

        Raises:
            ValueError: If called on a DETACHED object (no DocumentReference).
            RuntimeError: If called on a DELETED object.
            NotFound: If document doesn't exist in Firestore.

        State Transitions:
            ATTACHED -> LOADED: First fetch populates data
            LOADED -> LOADED: Refreshes data if force=True

        Example:
            # Normal fetch
            user = db.doc('users/alovelace')  # ATTACHED
            user.fetch()  # Now LOADED with data

            # Transactional fetch
            transaction = db.transaction()
            @firestore.transactional
            def read_user(transaction):
                user.fetch(transaction=transaction)
                return user.credits
            credits = read_user(transaction)
        """
        if self._should_skip_fetch(force):
            return self

        snapshot = self._get_snapshot(transaction)
        self._process_snapshot(snapshot, is_async=False)

        return self

    def save(
        self,
        doc_id: Optional[str] = None,
        transaction: Optional[Any] = None,
        batch: Optional[Any] = None,
    ) -> 'FireObject':
        """
        Save the object's data to Firestore (synchronous).

        Creates or updates the Firestore document based on the object's
        current state. For DETACHED objects, creates a new document. For
        LOADED objects, performs a full overwrite (Phase 1).

        Args:
            doc_id: Optional custom document ID. Only used when saving a
                   DETACHED object. If None, Firestore auto-generates an ID.
            transaction: Optional transaction object for transactional writes.
                        If provided, the write will be part of the transaction.
            batch: Optional batch object for batched writes. If provided,
                  the write will be accumulated in the batch (committed later).

        Returns:
            Self, to allow method chaining.

        Raises:
            RuntimeError: If called on a DELETED object.
            ValueError: If DETACHED object has no parent collection, or if
                       trying to create a new document within a transaction or batch.

        State Transitions:
            DETACHED -> LOADED: Creates new document with doc_id or auto-ID
            LOADED -> LOADED: Updates document if dirty, no-op if clean

        Example:
            # Create new document
            user = collection.new()
            user.name = 'Ada'
            user.save(doc_id='alovelace')  # DETACHED -> LOADED

            # Update existing
            user.year = 1816
            user.save()  # Performs update

            # Transactional save
            transaction = db.transaction()
            @firestore.transactional
            def update_user(transaction):
                user.fetch(transaction=transaction)
                user.credits += 10
                user.save(transaction=transaction)
            update_user(transaction)

            # Batch save
            batch = db.batch()
            user1.save(batch=batch)
            user2.save(batch=batch)
            batch.commit()  # Commit all operations
        """
        self._validate_not_deleted("save()")

        if self._state == State.DETACHED:
            doc_ref, storage_data = self._prepare_detached_save(doc_id, transaction, batch)
            self._write_set(storage_data, doc_ref=doc_ref)
            object.__setattr__(self, '_state', State.LOADED)
            self._mark_clean()
            return self

        if self._state == State.LOADED:
            if not self.is_dirty():
                return self

            update_dict = self._build_update_dict()
            self._write_update(update_dict, transaction=transaction, batch=batch)
            self._mark_clean()
            return self

        if self._state == State.ATTACHED:
            storage_data = self._prepare_data_for_storage()
            self._write_set(storage_data, transaction=transaction, batch=batch)
            object.__setattr__(self, '_state', State.LOADED)
            self._mark_clean()
            return self

        return self

    def collections(self, names_only: bool = False) -> List[Any]:
        """
        List subcollections beneath this document.

        Args:
            names_only: When True, return collection IDs instead of wrappers.

        Returns:
            List of subcollection names or FireCollection wrappers.
        """
        self._validate_not_detached("collections()")
        self._validate_not_deleted("collections()")

        subcollections = list(self._doc_ref.collections())
        if names_only:
            return [col.id for col in subcollections]

        return [self.collection(col.id) for col in subcollections]

    def delete(
        self,
        batch: Optional[Any] = None,
        *,
        recursive: bool = True,
        batch_size: int = 50,
    ) -> None:
        """
        Delete the document from Firestore (synchronous).

        Removes the document from Firestore and transitions the object to
        DELETED state. After deletion, the object retains its ID and path
        for reference but cannot be modified or saved.

        Args:
            batch: Optional batch object for batched deletes. If provided,
                  the delete will be accumulated in the batch (committed later).
            recursive: When True (default), delete all subcollections first.
            batch_size: Batch size to use for recursive subcollection cleanup.

        Raises:
            ValueError: If called on a DETACHED object (no document to delete).
            RuntimeError: If called on an already-DELETED object.
            ValueError: If recursive deletion is requested while using a batch.

        State Transitions:
            ATTACHED -> DELETED: Deletes document (data never loaded)
            LOADED -> DELETED: Deletes document (data was loaded)

        Example:
            user = db.doc('users/alovelace')
            user.delete()  # Document removed from Firestore
            print(user.state)  # State.DELETED
            print(user.id)  # Still accessible: 'alovelace'

            # Batch delete
            batch = db.batch()
            user1.delete(batch=batch, recursive=False)
            user2.delete(batch=batch, recursive=False)
            batch.commit()  # Commit all operations
        """
        if recursive:
            if batch is not None:
                raise ValueError("Cannot delete recursively as part of a batch.")
            if batch_size <= 0:
                raise ValueError(f"batch_size must be positive, got {batch_size}")
            self._delete_descendant_collections(batch_size=batch_size)

        self._prepare_delete()
        self._write_delete(batch=batch)
        self._transition_to_deleted()

    def _delete_descendant_collections(self, batch_size: int) -> None:
        """Delete all subcollections beneath this document."""
        for name in self.collections(names_only=True):
            subcollection = self.collection(name)
            subcollection.delete_all(batch_size=batch_size, recursive=True)

    # =========================================================================
    # Subcollection Utilities
    # =========================================================================

    def delete_subcollection(
        self,
        name: str,
        *,
        batch_size: int = 50,
        recursive: bool = True,
        dry_run: bool = False,
    ) -> Dict[str, int]:
        """
        Delete a subcollection beneath this document.

        Firestore keeps subcollections even after their parent document is
        deleted. This helper clears a specific subcollection using the same
        batched logic as FireCollection.delete_all().

        Args:
            name: Subcollection name relative to this document.
            batch_size: Maximum number of deletes per commit.
            recursive: Whether to delete nested subcollections.
            dry_run: Count affected documents without executing writes.

        Returns:
            Dictionary with counts for deleted documents and subcollections.
        """
        subcollection = self.collection(name)
        return subcollection.delete_all(
            batch_size=batch_size,
            recursive=recursive,
            dry_run=dry_run,
        )

    # =========================================================================
    # Factory Methods
    # =========================================================================

    @classmethod
    def from_snapshot(
        cls,
        snapshot: DocumentSnapshot,
        parent_collection: Optional[Any] = None
    ) -> 'FireObject':
        """
        Create a FireObject from a Firestore DocumentSnapshot.

        This factory method is the primary "hydration" mechanism for
        converting native Firestore query results into FireObject instances.
        It creates an object in LOADED state with data already populated.

        Args:
            snapshot: A DocumentSnapshot from google-cloud-firestore, typically
                     obtained from query results or document.get().
            parent_collection: Optional reference to parent FireCollection.

        Returns:
            A new FireObject instance in LOADED state with data from snapshot.

        Raises:
            ValueError: If snapshot doesn't exist (snapshot.exists is False).

        Example:
            # Hydrate from native query
            native_query = client.collection('users').where('year', '>', 1800)
            results = [FireObject.from_snapshot(snap)
                      for snap in native_query.stream()]

            # Hydrate from direct get
            snap = client.document('users/alovelace').get()
            user = FireObject.from_snapshot(snap)
        """
        # Use base class helper to extract snapshot data
        init_params = cls._create_from_snapshot_base(snapshot, parent_collection)

        # Create FireObject in LOADED state
        obj = cls(
            doc_ref=init_params['doc_ref'],
            initial_state=init_params['initial_state'],
            parent_collection=init_params['parent_collection']
        )

        # Populate data from snapshot
        object.__setattr__(obj, '_data', init_params['data'])

        return obj

__getattr__(name)

Handle attribute access for document fields with lazy loading.

This method implements lazy loading: if the object is in ATTACHED state, accessing any data attribute will automatically trigger a fetch() to load the data from Firestore.

Args: name: The attribute name being accessed.

Returns: The value of the field from the internal _data cache.

Raises: AttributeError: If the attribute doesn't exist in _data after fetching (if necessary).

State Transitions: ATTACHED -> LOADED: Automatically fetches data on first access.

Example: user = db.doc('users/alovelace') # ATTACHED name = user.name # Triggers fetch, transitions to LOADED year = user.year # No fetch needed, already LOADED

Source code in src/fire_prox/fire_object.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def __getattr__(self, name: str) -> Any:
    """
    Handle attribute access for document fields with lazy loading.

    This method implements lazy loading: if the object is in ATTACHED state,
    accessing any data attribute will automatically trigger a fetch() to load
    the data from Firestore.

    Args:
        name: The attribute name being accessed.

    Returns:
        The value of the field from the internal _data cache.

    Raises:
        AttributeError: If the attribute doesn't exist in _data after
                       fetching (if necessary).

    State Transitions:
        ATTACHED -> LOADED: Automatically fetches data on first access.

    Example:
        user = db.doc('users/alovelace')  # ATTACHED
        name = user.name  # Triggers fetch, transitions to LOADED
        year = user.year  # No fetch needed, already LOADED
    """
    # Check if we're accessing internal data
    if name == '_data':
        raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")

    # If we're in ATTACHED state, trigger lazy loading
    if self._state == State.ATTACHED:
        # Synchronous fetch for lazy loading
        self.fetch()

    return self._materialize_field(name)

collections(names_only=False)

List subcollections beneath this document.

Args: names_only: When True, return collection IDs instead of wrappers.

Returns: List of subcollection names or FireCollection wrappers.

Source code in src/fire_prox/fire_object.py
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def collections(self, names_only: bool = False) -> List[Any]:
    """
    List subcollections beneath this document.

    Args:
        names_only: When True, return collection IDs instead of wrappers.

    Returns:
        List of subcollection names or FireCollection wrappers.
    """
    self._validate_not_detached("collections()")
    self._validate_not_deleted("collections()")

    subcollections = list(self._doc_ref.collections())
    if names_only:
        return [col.id for col in subcollections]

    return [self.collection(col.id) for col in subcollections]

delete(batch=None, *, recursive=True, batch_size=50)

Delete the document from Firestore (synchronous).

Removes the document from Firestore and transitions the object to DELETED state. After deletion, the object retains its ID and path for reference but cannot be modified or saved.

Args: batch: Optional batch object for batched deletes. If provided, the delete will be accumulated in the batch (committed later). recursive: When True (default), delete all subcollections first. batch_size: Batch size to use for recursive subcollection cleanup.

Raises: ValueError: If called on a DETACHED object (no document to delete). RuntimeError: If called on an already-DELETED object. ValueError: If recursive deletion is requested while using a batch.

State Transitions: ATTACHED -> DELETED: Deletes document (data never loaded) LOADED -> DELETED: Deletes document (data was loaded)

Example: user = db.doc('users/alovelace') user.delete() # Document removed from Firestore print(user.state) # State.DELETED print(user.id) # Still accessible: 'alovelace'

# Batch delete
batch = db.batch()
user1.delete(batch=batch, recursive=False)
user2.delete(batch=batch, recursive=False)
batch.commit()  # Commit all operations
Source code in src/fire_prox/fire_object.py
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
def delete(
    self,
    batch: Optional[Any] = None,
    *,
    recursive: bool = True,
    batch_size: int = 50,
) -> None:
    """
    Delete the document from Firestore (synchronous).

    Removes the document from Firestore and transitions the object to
    DELETED state. After deletion, the object retains its ID and path
    for reference but cannot be modified or saved.

    Args:
        batch: Optional batch object for batched deletes. If provided,
              the delete will be accumulated in the batch (committed later).
        recursive: When True (default), delete all subcollections first.
        batch_size: Batch size to use for recursive subcollection cleanup.

    Raises:
        ValueError: If called on a DETACHED object (no document to delete).
        RuntimeError: If called on an already-DELETED object.
        ValueError: If recursive deletion is requested while using a batch.

    State Transitions:
        ATTACHED -> DELETED: Deletes document (data never loaded)
        LOADED -> DELETED: Deletes document (data was loaded)

    Example:
        user = db.doc('users/alovelace')
        user.delete()  # Document removed from Firestore
        print(user.state)  # State.DELETED
        print(user.id)  # Still accessible: 'alovelace'

        # Batch delete
        batch = db.batch()
        user1.delete(batch=batch, recursive=False)
        user2.delete(batch=batch, recursive=False)
        batch.commit()  # Commit all operations
    """
    if recursive:
        if batch is not None:
            raise ValueError("Cannot delete recursively as part of a batch.")
        if batch_size <= 0:
            raise ValueError(f"batch_size must be positive, got {batch_size}")
        self._delete_descendant_collections(batch_size=batch_size)

    self._prepare_delete()
    self._write_delete(batch=batch)
    self._transition_to_deleted()

delete_subcollection(name, *, batch_size=50, recursive=True, dry_run=False)

Delete a subcollection beneath this document.

Firestore keeps subcollections even after their parent document is deleted. This helper clears a specific subcollection using the same batched logic as FireCollection.delete_all().

Args: name: Subcollection name relative to this document. batch_size: Maximum number of deletes per commit. recursive: Whether to delete nested subcollections. dry_run: Count affected documents without executing writes.

Returns: Dictionary with counts for deleted documents and subcollections.

Source code in src/fire_prox/fire_object.py
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
def delete_subcollection(
    self,
    name: str,
    *,
    batch_size: int = 50,
    recursive: bool = True,
    dry_run: bool = False,
) -> Dict[str, int]:
    """
    Delete a subcollection beneath this document.

    Firestore keeps subcollections even after their parent document is
    deleted. This helper clears a specific subcollection using the same
    batched logic as FireCollection.delete_all().

    Args:
        name: Subcollection name relative to this document.
        batch_size: Maximum number of deletes per commit.
        recursive: Whether to delete nested subcollections.
        dry_run: Count affected documents without executing writes.

    Returns:
        Dictionary with counts for deleted documents and subcollections.
    """
    subcollection = self.collection(name)
    return subcollection.delete_all(
        batch_size=batch_size,
        recursive=recursive,
        dry_run=dry_run,
    )

fetch(force=False, transaction=None)

Fetch document data from Firestore (synchronous).

Retrieves the latest data from Firestore and populates the internal _data cache. This method transitions ATTACHED objects to LOADED state and can refresh data for already-LOADED objects.

Args: force: If True, fetch data even if already LOADED. Useful for refreshing data to get latest changes from Firestore. Default is False. transaction: Optional transaction object for transactional reads. If provided, the read will be part of the transaction.

Returns: Self, to allow method chaining.

Raises: ValueError: If called on a DETACHED object (no DocumentReference). RuntimeError: If called on a DELETED object. NotFound: If document doesn't exist in Firestore.

State Transitions: ATTACHED -> LOADED: First fetch populates data LOADED -> LOADED: Refreshes data if force=True

Example: # Normal fetch user = db.doc('users/alovelace') # ATTACHED user.fetch() # Now LOADED with data

# Transactional fetch
transaction = db.transaction()
@firestore.transactional
def read_user(transaction):
    user.fetch(transaction=transaction)
    return user.credits
credits = read_user(transaction)
Source code in src/fire_prox/fire_object.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
def fetch(self, force: bool = False, transaction: Optional[Any] = None) -> 'FireObject':
    """
    Fetch document data from Firestore (synchronous).

    Retrieves the latest data from Firestore and populates the internal
    _data cache. This method transitions ATTACHED objects to LOADED state
    and can refresh data for already-LOADED objects.

    Args:
        force: If True, fetch data even if already LOADED. Useful for
              refreshing data to get latest changes from Firestore.
              Default is False.
        transaction: Optional transaction object for transactional reads.
                    If provided, the read will be part of the transaction.

    Returns:
        Self, to allow method chaining.

    Raises:
        ValueError: If called on a DETACHED object (no DocumentReference).
        RuntimeError: If called on a DELETED object.
        NotFound: If document doesn't exist in Firestore.

    State Transitions:
        ATTACHED -> LOADED: First fetch populates data
        LOADED -> LOADED: Refreshes data if force=True

    Example:
        # Normal fetch
        user = db.doc('users/alovelace')  # ATTACHED
        user.fetch()  # Now LOADED with data

        # Transactional fetch
        transaction = db.transaction()
        @firestore.transactional
        def read_user(transaction):
            user.fetch(transaction=transaction)
            return user.credits
        credits = read_user(transaction)
    """
    if self._should_skip_fetch(force):
        return self

    snapshot = self._get_snapshot(transaction)
    self._process_snapshot(snapshot, is_async=False)

    return self

from_snapshot(snapshot, parent_collection=None) classmethod

Create a FireObject from a Firestore DocumentSnapshot.

This factory method is the primary "hydration" mechanism for converting native Firestore query results into FireObject instances. It creates an object in LOADED state with data already populated.

Args: snapshot: A DocumentSnapshot from google-cloud-firestore, typically obtained from query results or document.get(). parent_collection: Optional reference to parent FireCollection.

Returns: A new FireObject instance in LOADED state with data from snapshot.

Raises: ValueError: If snapshot doesn't exist (snapshot.exists is False).

Example: # Hydrate from native query native_query = client.collection('users').where('year', '>', 1800) results = [FireObject.from_snapshot(snap) for snap in native_query.stream()]

# Hydrate from direct get
snap = client.document('users/alovelace').get()
user = FireObject.from_snapshot(snap)
Source code in src/fire_prox/fire_object.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
@classmethod
def from_snapshot(
    cls,
    snapshot: DocumentSnapshot,
    parent_collection: Optional[Any] = None
) -> 'FireObject':
    """
    Create a FireObject from a Firestore DocumentSnapshot.

    This factory method is the primary "hydration" mechanism for
    converting native Firestore query results into FireObject instances.
    It creates an object in LOADED state with data already populated.

    Args:
        snapshot: A DocumentSnapshot from google-cloud-firestore, typically
                 obtained from query results or document.get().
        parent_collection: Optional reference to parent FireCollection.

    Returns:
        A new FireObject instance in LOADED state with data from snapshot.

    Raises:
        ValueError: If snapshot doesn't exist (snapshot.exists is False).

    Example:
        # Hydrate from native query
        native_query = client.collection('users').where('year', '>', 1800)
        results = [FireObject.from_snapshot(snap)
                  for snap in native_query.stream()]

        # Hydrate from direct get
        snap = client.document('users/alovelace').get()
        user = FireObject.from_snapshot(snap)
    """
    # Use base class helper to extract snapshot data
    init_params = cls._create_from_snapshot_base(snapshot, parent_collection)

    # Create FireObject in LOADED state
    obj = cls(
        doc_ref=init_params['doc_ref'],
        initial_state=init_params['initial_state'],
        parent_collection=init_params['parent_collection']
    )

    # Populate data from snapshot
    object.__setattr__(obj, '_data', init_params['data'])

    return obj

save(doc_id=None, transaction=None, batch=None)

Save the object's data to Firestore (synchronous).

Creates or updates the Firestore document based on the object's current state. For DETACHED objects, creates a new document. For LOADED objects, performs a full overwrite (Phase 1).

Args: doc_id: Optional custom document ID. Only used when saving a DETACHED object. If None, Firestore auto-generates an ID. transaction: Optional transaction object for transactional writes. If provided, the write will be part of the transaction. batch: Optional batch object for batched writes. If provided, the write will be accumulated in the batch (committed later).

Returns: Self, to allow method chaining.

Raises: RuntimeError: If called on a DELETED object. ValueError: If DETACHED object has no parent collection, or if trying to create a new document within a transaction or batch.

State Transitions: DETACHED -> LOADED: Creates new document with doc_id or auto-ID LOADED -> LOADED: Updates document if dirty, no-op if clean

Example: # Create new document user = collection.new() user.name = 'Ada' user.save(doc_id='alovelace') # DETACHED -> LOADED

# Update existing
user.year = 1816
user.save()  # Performs update

# Transactional save
transaction = db.transaction()
@firestore.transactional
def update_user(transaction):
    user.fetch(transaction=transaction)
    user.credits += 10
    user.save(transaction=transaction)
update_user(transaction)

# Batch save
batch = db.batch()
user1.save(batch=batch)
user2.save(batch=batch)
batch.commit()  # Commit all operations
Source code in src/fire_prox/fire_object.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
def save(
    self,
    doc_id: Optional[str] = None,
    transaction: Optional[Any] = None,
    batch: Optional[Any] = None,
) -> 'FireObject':
    """
    Save the object's data to Firestore (synchronous).

    Creates or updates the Firestore document based on the object's
    current state. For DETACHED objects, creates a new document. For
    LOADED objects, performs a full overwrite (Phase 1).

    Args:
        doc_id: Optional custom document ID. Only used when saving a
               DETACHED object. If None, Firestore auto-generates an ID.
        transaction: Optional transaction object for transactional writes.
                    If provided, the write will be part of the transaction.
        batch: Optional batch object for batched writes. If provided,
              the write will be accumulated in the batch (committed later).

    Returns:
        Self, to allow method chaining.

    Raises:
        RuntimeError: If called on a DELETED object.
        ValueError: If DETACHED object has no parent collection, or if
                   trying to create a new document within a transaction or batch.

    State Transitions:
        DETACHED -> LOADED: Creates new document with doc_id or auto-ID
        LOADED -> LOADED: Updates document if dirty, no-op if clean

    Example:
        # Create new document
        user = collection.new()
        user.name = 'Ada'
        user.save(doc_id='alovelace')  # DETACHED -> LOADED

        # Update existing
        user.year = 1816
        user.save()  # Performs update

        # Transactional save
        transaction = db.transaction()
        @firestore.transactional
        def update_user(transaction):
            user.fetch(transaction=transaction)
            user.credits += 10
            user.save(transaction=transaction)
        update_user(transaction)

        # Batch save
        batch = db.batch()
        user1.save(batch=batch)
        user2.save(batch=batch)
        batch.commit()  # Commit all operations
    """
    self._validate_not_deleted("save()")

    if self._state == State.DETACHED:
        doc_ref, storage_data = self._prepare_detached_save(doc_id, transaction, batch)
        self._write_set(storage_data, doc_ref=doc_ref)
        object.__setattr__(self, '_state', State.LOADED)
        self._mark_clean()
        return self

    if self._state == State.LOADED:
        if not self.is_dirty():
            return self

        update_dict = self._build_update_dict()
        self._write_update(update_dict, transaction=transaction, batch=batch)
        self._mark_clean()
        return self

    if self._state == State.ATTACHED:
        storage_data = self._prepare_data_for_storage()
        self._write_set(storage_data, transaction=transaction, batch=batch)
        object.__setattr__(self, '_state', State.LOADED)
        self._mark_clean()
        return self

    return self

fire_query

FireQuery: Chainable query builder for Firestore (synchronous).

This module provides the synchronous FireQuery class, which wraps native Firestore Query objects and provides a chainable interface for building and executing queries.

FireQuery

A chainable query builder for Firestore collections (synchronous).

FireQuery wraps the native google-cloud-firestore Query object and provides a simplified, chainable interface for building and executing queries. It follows an immutable pattern - each method returns a new FireQuery instance with the modified query.

This is the synchronous implementation. For async queries, use AsyncFireQuery.

Usage Examples: # Basic filtering query = users.where('birth_year', '>', 1800) for user in query.get(): print(user.name)

# Chaining multiple conditions
query = (users
         .where('birth_year', '>', 1800)
         .where('country', '==', 'England')
         .order_by('birth_year')
         .limit(10))
for user in query.get():
    print(f"{user.name} - {user.birth_year}")

# Stream results (generator)
for user in users.where('active', '==', True).stream():
    print(user.name)

Design Note: For complex queries beyond the scope of this builder (e.g., OR queries, advanced filtering), use the native Query API directly and hydrate results with FireObject.from_snapshot():

    native_query = client.collection('users').where(...)
    results = [FireObject.from_snapshot(snap) for snap in native_query.stream()]
Source code in src/fire_prox/fire_query.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
class FireQuery:
    """
    A chainable query builder for Firestore collections (synchronous).

    FireQuery wraps the native google-cloud-firestore Query object and provides
    a simplified, chainable interface for building and executing queries. It
    follows an immutable pattern - each method returns a new FireQuery instance
    with the modified query.

    This is the synchronous implementation. For async queries, use AsyncFireQuery.

    Usage Examples:
        # Basic filtering
        query = users.where('birth_year', '>', 1800)
        for user in query.get():
            print(user.name)

        # Chaining multiple conditions
        query = (users
                 .where('birth_year', '>', 1800)
                 .where('country', '==', 'England')
                 .order_by('birth_year')
                 .limit(10))
        for user in query.get():
            print(f"{user.name} - {user.birth_year}")

        # Stream results (generator)
        for user in users.where('active', '==', True).stream():
            print(user.name)

    Design Note:
        For complex queries beyond the scope of this builder (e.g., OR queries,
        advanced filtering), use the native Query API directly and hydrate results
        with FireObject.from_snapshot():

            native_query = client.collection('users').where(...)
            results = [FireObject.from_snapshot(snap) for snap in native_query.stream()]
    """

    def __init__(
        self,
        native_query: Query,
        parent_collection: Optional[Any] = None,
        projection: Optional[tuple] = None,
    ):
        """
        Initialize a FireQuery.

        Args:
            native_query: The underlying native Query object from google-cloud-firestore.
            parent_collection: Optional reference to parent FireCollection.
            projection: Optional tuple of field paths to project (select specific fields).
        """
        self._query = native_query
        self._parent_collection = parent_collection
        self._projection = projection

    # =========================================================================
    # Query Building Methods (Immutable Pattern)
    # =========================================================================

    def where(self, field: str, op: str, value: Any) -> 'FireQuery':
        """
        Add a filter condition to the query.

        Creates a new FireQuery with an additional filter condition.
        Uses the immutable pattern - returns a new instance rather than
        modifying the current query.

        Args:
            field: The field path to filter on (e.g., 'name', 'address.city').
            op: Comparison operator. Supported operators:
                '==' (equal), '!=' (not equal),
                '<' (less than), '<=' (less than or equal),
                '>' (greater than), '>=' (greater than or equal),
                'in' (value in list), 'not-in' (value not in list),
                'array-contains' (array contains value),
                'array-contains-any' (array contains any of the values).
            value: The value to compare against.

        Returns:
            A new FireQuery instance with the added filter.

        Example:
            # Single condition
            query = users.where('birth_year', '>', 1800)

            # Multiple conditions (chained)
            query = (users
                     .where('birth_year', '>', 1800)
                     .where('country', '==', 'England'))
        """
        # Create FieldFilter and add to query
        filter_obj = FieldFilter(field, op, value)
        new_query = self._query.where(filter=filter_obj)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def order_by(self, field: str, direction: str = 'ASCENDING') -> 'FireQuery':
        """
        Add an ordering clause to the query.

        Creates a new FireQuery with ordering by the specified field.

        Args:
            field: The field path to order by.
            direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'.
                      Default is 'ASCENDING'.

        Returns:
            A new FireQuery instance with the ordering applied.

        Example:
            # Ascending order
            query = users.order_by('birth_year')

            # Descending order
            query = users.order_by('birth_year', direction='DESCENDING')

            # Multiple orderings (chained)
            query = (users
                     .order_by('country')
                     .order_by('birth_year', direction='DESCENDING'))
        """
        # Convert direction string to Query constant
        if direction.upper() == 'ASCENDING':
            from google.cloud.firestore_v1 import Query as QueryClass
            direction_const = QueryClass.ASCENDING
        elif direction.upper() == 'DESCENDING':
            from google.cloud.firestore_v1 import Query as QueryClass
            direction_const = QueryClass.DESCENDING
        else:
            raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

        new_query = self._query.order_by(field, direction=direction_const)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def limit(self, count: int) -> 'FireQuery':
        """
        Limit the number of results returned.

        Creates a new FireQuery that will return at most `count` results.

        Args:
            count: Maximum number of documents to return. Must be positive.

        Returns:
            A new FireQuery instance with the limit applied.

        Raises:
            ValueError: If count is not positive.

        Example:
            # Get top 10 results
            query = users.order_by('score', direction='DESCENDING').limit(10)

            # Get first 5 matching documents
            query = users.where('active', '==', True).limit(5)
        """
        if count <= 0:
            raise ValueError(f"Limit count must be positive, got {count}")

        new_query = self._query.limit(count)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def start_at(self, *document_fields_or_snapshot) -> 'FireQuery':
        """
        Start query results at a cursor position (inclusive).

        Creates a new FireQuery that starts at the specified cursor. The cursor
        can be a document snapshot or a dictionary of field values matching the
        order_by fields.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot from a previous query
                - Direct field values matching order_by clause order

        Returns:
            A new FireQuery instance with the start cursor applied.

        Example:
            # Using field values (requires matching order_by)
            query = users.order_by('age').start_at({'age': 25})

            # Pagination: get first page, then start at last document
            page1 = users.order_by('age').limit(10).get()
            last_age = page1[-1].age
            page2 = users.order_by('age').start_at({'age': last_age}).limit(10).get()

            # Using a document snapshot
            last_doc_ref = page1[-1]._doc_ref
            last_snapshot = last_doc_ref.get()
            page2 = users.order_by('age').start_at(last_snapshot).limit(10).get()
        """
        new_query = self._query.start_at(*document_fields_or_snapshot)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def start_after(self, *document_fields_or_snapshot) -> 'FireQuery':
        """
        Start query results after a cursor position (exclusive).

        Creates a new FireQuery that starts after the specified cursor. The cursor
        document itself is excluded from results. This is typically used for
        pagination to avoid duplicating the last document from the previous page.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot from a previous query
                - Direct field values matching order_by clause order

        Returns:
            A new FireQuery instance with the start-after cursor applied.

        Example:
            # Pagination: exclude the last document from previous page
            page1 = users.order_by('age').limit(10).get()
            last_age = page1[-1].age
            page2 = users.order_by('age').start_after({'age': last_age}).limit(10).get()

            # Using a document snapshot (common pattern)
            last_doc_ref = page1[-1]._doc_ref
            last_snapshot = last_doc_ref.get()
            page2 = users.order_by('age').start_after(last_snapshot).limit(10).get()
        """
        new_query = self._query.start_after(*document_fields_or_snapshot)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def end_at(self, *document_fields_or_snapshot) -> 'FireQuery':
        """
        End query results at a cursor position (inclusive).

        Creates a new FireQuery that ends at the specified cursor. The cursor
        document is included in the results.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot
                - Direct field values matching order_by clause order

        Returns:
            A new FireQuery instance with the end cursor applied.

        Example:
            # Get all users up to and including age 50
            query = users.order_by('age').end_at({'age': 50})

            # Using a specific document as endpoint
            target_doc_ref = users.doc('user123')._doc_ref
            target_snapshot = target_doc_ref.get()
            query = users.order_by('age').end_at(target_snapshot)
        """
        new_query = self._query.end_at(*document_fields_or_snapshot)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def end_before(self, *document_fields_or_snapshot) -> 'FireQuery':
        """
        End query results before a cursor position (exclusive).

        Creates a new FireQuery that ends before the specified cursor. The cursor
        document itself is excluded from results.

        Args:
            *document_fields_or_snapshot: Either:
                - A dictionary of field values: {'field': value}
                - A DocumentSnapshot
                - Direct field values matching order_by clause order

        Returns:
            A new FireQuery instance with the end-before cursor applied.

        Example:
            # Get all users before age 50 (exclude 50)
            query = users.order_by('age').end_before({'age': 50})

            # Using a specific document as exclusive endpoint
            target_doc_ref = users.doc('user123')._doc_ref
            target_snapshot = target_doc_ref.get()
            query = users.order_by('age').end_before(target_snapshot)
        """
        new_query = self._query.end_before(*document_fields_or_snapshot)
        return FireQuery(new_query, self._parent_collection, self._projection)

    def select(self, *field_paths: str) -> 'FireQuery':
        """
        Select specific fields to return (projection).

        Creates a new FireQuery that only returns the specified fields in the
        query results. When using projections, query results will be returned
        as vanilla dictionaries instead of FireObject instances. Any
        DocumentReferences in the returned dictionaries will be automatically
        converted to FireObject instances in ATTACHED state.

        Args:
            *field_paths: One or more field paths to select. Field paths can
                         include nested fields using dot notation (e.g., 'address.city').

        Returns:
            A new FireQuery instance with the projection applied.

        Raises:
            ValueError: If no field paths are provided.

        Example:
            # Select a single field
            query = users.select('name')
            results = query.get()
            # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

            # Select multiple fields
            query = users.select('name', 'email', 'birth_year')
            results = query.get()
            # Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

            # Select with filtering and ordering
            query = (users
                     .where('birth_year', '>', 1990)
                     .select('name', 'birth_year')
                     .order_by('birth_year')
                     .limit(10))

            # DocumentReferences are auto-converted to FireObjects
            query = posts.select('title', 'author')  # author is a DocumentReference
            results = query.get()
            # results[0]['author'] is a FireObject, not a DocumentReference
            print(results[0]['author'].name)  # Can access fields after fetch()

        Note:
            - Projection queries return dictionaries, not FireObject instances
            - Only the selected fields will be present in the returned dictionaries
            - DocumentReferences are automatically hydrated to FireObject instances
            - Projected results are more bandwidth-efficient for large documents
        """
        if not field_paths:
            raise ValueError("select() requires at least one field path")

        # Create new query with projection
        new_query = self._query.select(list(field_paths))
        return FireQuery(new_query, self._parent_collection, projection=field_paths)

    def find_nearest(
        self,
        vector_field: str,
        query_vector: Any,
        distance_measure: Any,
        limit: int,
        distance_result_field: Optional[str] = None,
    ) -> 'FireQuery':
        """
        Find the nearest neighbors based on vector similarity.

        Performs a vector similarity search on top of the current query filters.
        This allows you to combine pre-filtering with vector search (requires
        a composite index).

        Args:
            vector_field: Name of the field containing vector embeddings.
            query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
            distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
                DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
            limit: Maximum number of nearest neighbors to return (max 1000).
            distance_result_field: Optional field name to store the calculated distance
                in the query results.

        Returns:
            A new FireQuery instance with the vector search applied.

        Example:
            from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
            from google.cloud.firestore_v1.vector import Vector

            # Find nearest neighbors with pre-filtering
            query = (collection
                     .where('category', '==', 'tech')
                     .find_nearest(
                         vector_field="embedding",
                         query_vector=Vector([0.1, 0.2, 0.3]),
                         distance_measure=DistanceMeasure.COSINE,
                         limit=5
                     ))
            for doc in query.get():
                print(f"{doc.title}: {doc.category}")

        Note:
            - Requires a composite index when combining with where() clauses
            - Maximum limit is 1000 documents
            - Does not work with Firestore emulator (production only)
        """
        # Create vector query using native find_nearest
        new_query = self._query.find_nearest(
            vector_field=vector_field,
            query_vector=query_vector,
            distance_measure=distance_measure,
            limit=limit,
            distance_result_field=distance_result_field,
        )
        return FireQuery(new_query, self._parent_collection, self._projection)

    # =========================================================================
    # Aggregation Methods
    # =========================================================================

    def count(self) -> int:
        """
        Count documents matching the query.

        Executes an aggregation query to count the number of documents that
        match the current query filters without fetching the actual documents.
        This is more efficient than fetching all documents and counting them.

        Returns:
            Integer count of matching documents. Returns 0 if no documents match.

        Example:
            # Count all users
            total_users = users.count()
            # Returns: 150

            # Count with filters
            active_users = users.where('active', '==', True).count()
            # Returns: 42

            # Count with complex query
            count = (users
                     .where('age', '>', 25)
                     .where('country', '==', 'USA')
                     .count())
            # Returns: 37

        Note:
            This uses Firestore's native aggregation API, which is more efficient
            than fetching documents. However, it still counts as one document read
            per 1000 documents in the collection.
        """
        # Create aggregation query using Query's count method
        agg_query = self._query.count(alias='count')

        # Execute and extract result
        result = agg_query.get()
        if result and len(result) > 0:
            # Extract count from first (and only) aggregation result
            for agg_result in result:
                return agg_result[0].value
        return 0

    def sum(self, field: str) -> Union[int, float]:
        """
        Sum a numeric field across all matching documents.

        Executes an aggregation query to sum the values of a specific field
        without fetching the actual documents. The field must contain numeric
        values (int or float).

        Args:
            field: Name of the numeric field to sum.

        Returns:
            Sum of the field values across all matching documents.
            Returns 0 if no documents match or if all values are null.

        Raises:
            ValueError: If field is None or empty.

        Example:
            # Sum all salaries
            total_salary = employees.sum('salary')
            # Returns: 5000000

            # Sum with filters
            engineering_salary = (employees
                                  .where('department', '==', 'Engineering')
                                  .sum('salary'))
            # Returns: 2500000

            # Sum revenue from active products
            total_revenue = (products
                            .where('active', '==', True)
                            .sum('revenue'))
            # Returns: 1250000.50

        Note:
            - Null values are ignored in the sum
            - Non-numeric values will cause an error
            - This is more efficient than fetching all documents
        """
        if not field:
            raise ValueError("sum() requires a field name")

        # Create aggregation query using Query's sum method
        agg_query = self._query.sum(field, alias='sum')

        # Execute and extract result
        result = agg_query.get()
        if result and len(result) > 0:
            for agg_result in result:
                return agg_result[0].value
        return 0

    def avg(self, field: str) -> float:
        """
        Average a numeric field across all matching documents.

        Executes an aggregation query to calculate the arithmetic mean of a
        specific field without fetching the actual documents. The field must
        contain numeric values (int or float).

        Args:
            field: Name of the numeric field to average.

        Returns:
            Average of the field values across all matching documents.
            Returns 0.0 if no documents match or if all values are null.

        Raises:
            ValueError: If field is None or empty.

        Example:
            # Average age of all users
            avg_age = users.avg('age')
            # Returns: 32.5

            # Average with filters
            avg_salary = (employees
                         .where('department', '==', 'Engineering')
                         .avg('salary'))
            # Returns: 125000.0

            # Average rating for active products
            avg_rating = (products
                         .where('active', '==', True)
                         .avg('rating'))
            # Returns: 4.2

        Note:
            - Null values are ignored in the average calculation
            - Non-numeric values will cause an error
            - This is more efficient than fetching all documents
        """
        if not field:
            raise ValueError("avg() requires a field name")

        # Create aggregation query using Query's avg method
        agg_query = self._query.avg(field, alias='avg')

        # Execute and extract result
        result = agg_query.get()
        if result and len(result) > 0:
            for agg_result in result:
                value = agg_result[0].value
                return value if value is not None else 0.0
        return 0.0

    def aggregate(self, **aggregations) -> Dict[str, Any]:
        """
        Perform multiple aggregations in a single query.

        Executes an aggregation query with multiple aggregation operations
        (count, sum, average) without fetching the actual documents. This is
        more efficient than running multiple separate aggregation queries.

        Args:
            **aggregations: Named aggregations using Count(), Sum(field), or
                          Avg(field) from fire_prox.aggregation module.

        Returns:
            Dictionary mapping aggregation names to their results.

        Raises:
            ValueError: If no aggregations are provided or if invalid
                       aggregation types are used.

        Example:
            from fire_prox.aggregation import Count, Sum, Avg

            # Multiple aggregations in one query
            stats = employees.aggregate(
                total_count=Count(),
                total_salary=Sum('salary'),
                avg_salary=Avg('salary'),
                avg_age=Avg('age')
            )
            # Returns: {
            #     'total_count': 150,
            #     'total_salary': 15000000,
            #     'avg_salary': 100000.0,
            #     'avg_age': 35.2
            # }

            # With filters
            eng_stats = (employees
                        .where('department', '==', 'Engineering')
                        .aggregate(
                            count=Count(),
                            total_salary=Sum('salary')
                        ))
            # Returns: {'count': 50, 'total_salary': 5000000}

            # Financial dashboard
            financials = (transactions
                         .where('date', '>=', start_date)
                         .aggregate(
                             total_transactions=Count(),
                             total_revenue=Sum('amount'),
                             avg_transaction=Avg('amount')
                         ))

        Note:
            - Much more efficient than multiple separate aggregation queries
            - All aggregations execute in a single round-trip to Firestore
            - Null values are ignored in sum and average calculations
        """
        if not aggregations:
            raise ValueError("aggregate() requires at least one aggregation")

        from .aggregation import Avg, Count, Sum

        # Start with the first aggregation to create the AggregationQuery
        first_alias, first_agg_type = next(iter(aggregations.items()))

        if isinstance(first_agg_type, Count):
            agg_query = self._query.count(alias=first_alias)
        elif isinstance(first_agg_type, Sum):
            if not first_agg_type.field:
                raise ValueError(f"Sum aggregation '{first_alias}' is missing a field name")
            agg_query = self._query.sum(first_agg_type.field, alias=first_alias)
        elif isinstance(first_agg_type, Avg):
            if not first_agg_type.field:
                raise ValueError(f"Avg aggregation '{first_alias}' is missing a field name")
            agg_query = self._query.avg(first_agg_type.field, alias=first_alias)
        else:
            raise ValueError(
                f"Invalid aggregation type for '{first_alias}': {type(first_agg_type).__name__}. "
                f"Use Count(), Sum(field), or Avg(field)"
            )

        # Add remaining aggregations
        remaining_items = list(aggregations.items())[1:]
        for alias, agg_type in remaining_items:
            if isinstance(agg_type, Count):
                agg_query = agg_query.count(alias=alias)
            elif isinstance(agg_type, Sum):
                if not agg_type.field:
                    raise ValueError(f"Sum aggregation '{alias}' is missing a field name")
                agg_query = agg_query.sum(agg_type.field, alias=alias)
            elif isinstance(agg_type, Avg):
                if not agg_type.field:
                    raise ValueError(f"Avg aggregation '{alias}' is missing a field name")
                agg_query = agg_query.avg(agg_type.field, alias=alias)
            else:
                raise ValueError(
                    f"Invalid aggregation type for '{alias}': {type(agg_type).__name__}. "
                    f"Use Count(), Sum(field), or Avg(field)"
                )

        # Execute and extract results
        result = agg_query.get()
        results_dict = {}

        if result and len(result) > 0:
            # Extract all aggregation results by matching aliases
            for agg_result in result:
                for agg in agg_result:
                    value = agg.value
                    # Convert None to 0 for consistency
                    results_dict[agg.alias] = value if value is not None else 0

        return results_dict

    # =========================================================================
    # Helper Methods
    # =========================================================================

    def _convert_projection_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Convert DocumentReferences in projection data to FireObjects.

        Recursively processes a dictionary to convert any DocumentReference
        instances to FireObject instances in ATTACHED state. This allows
        users to work with references naturally using the FireProx API.

        Args:
            data: Dictionary containing projection data from Firestore.

        Returns:
            Dictionary with DocumentReferences converted to FireObjects.
        """
        from .state import State

        result = {}
        for key, value in data.items():
            if isinstance(value, DocumentReference):
                # Convert DocumentReference to FireObject in ATTACHED state
                result[key] = FireObject(
                    doc_ref=value,
                    initial_state=State.ATTACHED,
                    parent_collection=self._parent_collection
                )
            elif isinstance(value, list):
                # Recursively process lists
                result[key] = [
                    FireObject(
                        doc_ref=item,
                        initial_state=State.ATTACHED,
                        parent_collection=self._parent_collection
                    ) if isinstance(item, DocumentReference)
                    else self._convert_projection_data(item) if isinstance(item, dict)
                    else item
                    for item in value
                ]
            elif isinstance(value, dict):
                # Recursively process nested dictionaries
                result[key] = self._convert_projection_data(value)
            else:
                # Keep primitive values as-is
                result[key] = value
        return result

    # =========================================================================
    # Query Execution Methods
    # =========================================================================

    def get(self) -> Union[List[FireObject], List[Dict[str, Any]]]:
        """
        Execute the query and return results as a list.

        Fetches all matching documents and hydrates them into FireObject
        instances in LOADED state. If a projection is active (via .select()),
        returns vanilla dictionaries instead of FireObject instances.

        Returns:
            - If no projection: List of FireObject instances for all documents
              matching the query.
            - If projection active: List of dictionaries containing only the
              selected fields. DocumentReferences are converted to FireObjects.
            - Empty list if no documents match.

        Example:
            # Get all results as FireObjects
            users = query.get()
            for user in users:
                print(f"{user.name}: {user.birth_year}")

            # Get projected results as dictionaries
            users = query.select('name', 'email').get()
            for user_dict in users:
                print(f"{user_dict['name']}: {user_dict['email']}")

            # Check if results exist
            results = query.get()
            if results:
                print(f"Found {len(results)} users")
            else:
                print("No users found")
        """
        # Execute query
        snapshots = self._query.stream()

        # If projection is active, return vanilla dictionaries
        if self._projection:
            results = []
            for snap in snapshots:
                data = snap.to_dict()
                # Convert DocumentReferences to FireObjects
                converted_data = self._convert_projection_data(data)
                results.append(converted_data)
            return results

        # Otherwise, return FireObjects as usual
        return [FireObject.from_snapshot(snap, self._parent_collection) for snap in snapshots]

    def stream(self) -> Union[Iterator[FireObject], Iterator[Dict[str, Any]]]:
        """
        Execute the query and stream results as an iterator.

        Returns a generator that yields FireObject instances one at a time.
        This is more memory-efficient than .get() for large result sets
        as it doesn't load all results into memory at once. If a projection
        is active (via .select()), yields vanilla dictionaries instead.

        Yields:
            - If no projection: FireObject instances in LOADED state for each
              matching document.
            - If projection active: Dictionaries containing only the selected
              fields. DocumentReferences are converted to FireObjects.

        Example:
            # Stream results one at a time as FireObjects
            for user in query.stream():
                print(f"{user.name}: {user.birth_year}")
                # Process each user without loading all users into memory

            # Stream projected results as dictionaries
            for user_dict in query.select('name', 'email').stream():
                print(f"{user_dict['name']}: {user_dict['email']}")

            # Works with any query
            for post in (posts
                        .where('published', '==', True)
                        .order_by('date', direction='DESCENDING')
                        .stream()):
                print(post.title)
        """
        # If projection is active, stream vanilla dictionaries
        if self._projection:
            for snapshot in self._query.stream():
                data = snapshot.to_dict()
                # Convert DocumentReferences to FireObjects
                converted_data = self._convert_projection_data(data)
                yield converted_data
        else:
            # Otherwise, stream FireObjects as usual
            for snapshot in self._query.stream():
                yield FireObject.from_snapshot(snapshot, self._parent_collection)

    # =========================================================================
    # Real-Time Listeners (Sync-only)
    # =========================================================================

    def on_snapshot(self, callback: Any) -> Any:
        """
        Listen for real-time updates to this query.

        This method sets up a real-time listener that fires the callback
        whenever any document matching the query changes. The listener runs
        on a separate thread managed by the Firestore SDK.

        **Important**: This is a sync-only feature. The listener uses the
        underlying synchronous query to run on a background thread. This is
        the standard Firestore pattern for real-time listeners in Python.

        Args:
            callback: Callback function invoked on query changes.
                     Signature: callback(query_snapshot, changes, read_time)
                     - query_snapshot: List of DocumentSnapshot objects matching the query
                     - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                     - read_time: Timestamp of the snapshot

        Returns:
            Watch object with an `.unsubscribe()` method to stop listening.

        Example:
            import threading

            callback_done = threading.Event()

            def on_change(query_snapshot, changes, read_time):
                for change in changes:
                    if change.type.name == 'ADDED':
                        print(f"New: {change.document.id}")
                    elif change.type.name == 'MODIFIED':
                        print(f"Modified: {change.document.id}")
                    elif change.type.name == 'REMOVED':
                        print(f"Removed: {change.document.id}")
                callback_done.set()

            # Listen to active users only
            active_users = users.where('status', '==', 'active')
            watch = active_users.on_snapshot(on_change)

            # Wait for initial snapshot
            callback_done.wait()

            # Later: stop listening
            watch.unsubscribe()

        Note:
            The callback runs on a separate thread. Use threading primitives
            (Event, Lock, Queue) for synchronization with your main thread.
        """
        # Use the native query's on_snapshot method directly
        return self._query.on_snapshot(callback)

    def __repr__(self) -> str:
        """Return string representation of the query."""
        return f"<FireQuery query={self._query}>"

    def __str__(self) -> str:
        """Return human-readable string representation."""
        return f"FireQuery({self._query})"

__init__(native_query, parent_collection=None, projection=None)

Initialize a FireQuery.

Args: native_query: The underlying native Query object from google-cloud-firestore. parent_collection: Optional reference to parent FireCollection. projection: Optional tuple of field paths to project (select specific fields).

Source code in src/fire_prox/fire_query.py
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def __init__(
    self,
    native_query: Query,
    parent_collection: Optional[Any] = None,
    projection: Optional[tuple] = None,
):
    """
    Initialize a FireQuery.

    Args:
        native_query: The underlying native Query object from google-cloud-firestore.
        parent_collection: Optional reference to parent FireCollection.
        projection: Optional tuple of field paths to project (select specific fields).
    """
    self._query = native_query
    self._parent_collection = parent_collection
    self._projection = projection

__repr__()

Return string representation of the query.

Source code in src/fire_prox/fire_query.py
892
893
894
def __repr__(self) -> str:
    """Return string representation of the query."""
    return f"<FireQuery query={self._query}>"

__str__()

Return human-readable string representation.

Source code in src/fire_prox/fire_query.py
896
897
898
def __str__(self) -> str:
    """Return human-readable string representation."""
    return f"FireQuery({self._query})"

aggregate(**aggregations)

Perform multiple aggregations in a single query.

Executes an aggregation query with multiple aggregation operations (count, sum, average) without fetching the actual documents. This is more efficient than running multiple separate aggregation queries.

Args: **aggregations: Named aggregations using Count(), Sum(field), or Avg(field) from fire_prox.aggregation module.

Returns: Dictionary mapping aggregation names to their results.

Raises: ValueError: If no aggregations are provided or if invalid aggregation types are used.

Example: from fire_prox.aggregation import Count, Sum, Avg

# Multiple aggregations in one query
stats = employees.aggregate(
    total_count=Count(),
    total_salary=Sum('salary'),
    avg_salary=Avg('salary'),
    avg_age=Avg('age')
)
# Returns: {
#     'total_count': 150,
#     'total_salary': 15000000,
#     'avg_salary': 100000.0,
#     'avg_age': 35.2
# }

# With filters
eng_stats = (employees
            .where('department', '==', 'Engineering')
            .aggregate(
                count=Count(),
                total_salary=Sum('salary')
            ))
# Returns: {'count': 50, 'total_salary': 5000000}

# Financial dashboard
financials = (transactions
             .where('date', '>=', start_date)
             .aggregate(
                 total_transactions=Count(),
                 total_revenue=Sum('amount'),
                 avg_transaction=Avg('amount')
             ))

Note: - Much more efficient than multiple separate aggregation queries - All aggregations execute in a single round-trip to Firestore - Null values are ignored in sum and average calculations

Source code in src/fire_prox/fire_query.py
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
def aggregate(self, **aggregations) -> Dict[str, Any]:
    """
    Perform multiple aggregations in a single query.

    Executes an aggregation query with multiple aggregation operations
    (count, sum, average) without fetching the actual documents. This is
    more efficient than running multiple separate aggregation queries.

    Args:
        **aggregations: Named aggregations using Count(), Sum(field), or
                      Avg(field) from fire_prox.aggregation module.

    Returns:
        Dictionary mapping aggregation names to their results.

    Raises:
        ValueError: If no aggregations are provided or if invalid
                   aggregation types are used.

    Example:
        from fire_prox.aggregation import Count, Sum, Avg

        # Multiple aggregations in one query
        stats = employees.aggregate(
            total_count=Count(),
            total_salary=Sum('salary'),
            avg_salary=Avg('salary'),
            avg_age=Avg('age')
        )
        # Returns: {
        #     'total_count': 150,
        #     'total_salary': 15000000,
        #     'avg_salary': 100000.0,
        #     'avg_age': 35.2
        # }

        # With filters
        eng_stats = (employees
                    .where('department', '==', 'Engineering')
                    .aggregate(
                        count=Count(),
                        total_salary=Sum('salary')
                    ))
        # Returns: {'count': 50, 'total_salary': 5000000}

        # Financial dashboard
        financials = (transactions
                     .where('date', '>=', start_date)
                     .aggregate(
                         total_transactions=Count(),
                         total_revenue=Sum('amount'),
                         avg_transaction=Avg('amount')
                     ))

    Note:
        - Much more efficient than multiple separate aggregation queries
        - All aggregations execute in a single round-trip to Firestore
        - Null values are ignored in sum and average calculations
    """
    if not aggregations:
        raise ValueError("aggregate() requires at least one aggregation")

    from .aggregation import Avg, Count, Sum

    # Start with the first aggregation to create the AggregationQuery
    first_alias, first_agg_type = next(iter(aggregations.items()))

    if isinstance(first_agg_type, Count):
        agg_query = self._query.count(alias=first_alias)
    elif isinstance(first_agg_type, Sum):
        if not first_agg_type.field:
            raise ValueError(f"Sum aggregation '{first_alias}' is missing a field name")
        agg_query = self._query.sum(first_agg_type.field, alias=first_alias)
    elif isinstance(first_agg_type, Avg):
        if not first_agg_type.field:
            raise ValueError(f"Avg aggregation '{first_alias}' is missing a field name")
        agg_query = self._query.avg(first_agg_type.field, alias=first_alias)
    else:
        raise ValueError(
            f"Invalid aggregation type for '{first_alias}': {type(first_agg_type).__name__}. "
            f"Use Count(), Sum(field), or Avg(field)"
        )

    # Add remaining aggregations
    remaining_items = list(aggregations.items())[1:]
    for alias, agg_type in remaining_items:
        if isinstance(agg_type, Count):
            agg_query = agg_query.count(alias=alias)
        elif isinstance(agg_type, Sum):
            if not agg_type.field:
                raise ValueError(f"Sum aggregation '{alias}' is missing a field name")
            agg_query = agg_query.sum(agg_type.field, alias=alias)
        elif isinstance(agg_type, Avg):
            if not agg_type.field:
                raise ValueError(f"Avg aggregation '{alias}' is missing a field name")
            agg_query = agg_query.avg(agg_type.field, alias=alias)
        else:
            raise ValueError(
                f"Invalid aggregation type for '{alias}': {type(agg_type).__name__}. "
                f"Use Count(), Sum(field), or Avg(field)"
            )

    # Execute and extract results
    result = agg_query.get()
    results_dict = {}

    if result and len(result) > 0:
        # Extract all aggregation results by matching aliases
        for agg_result in result:
            for agg in agg_result:
                value = agg.value
                # Convert None to 0 for consistency
                results_dict[agg.alias] = value if value is not None else 0

    return results_dict

avg(field)

Average a numeric field across all matching documents.

Executes an aggregation query to calculate the arithmetic mean of a specific field without fetching the actual documents. The field must contain numeric values (int or float).

Args: field: Name of the numeric field to average.

Returns: Average of the field values across all matching documents. Returns 0.0 if no documents match or if all values are null.

Raises: ValueError: If field is None or empty.

Example: # Average age of all users avg_age = users.avg('age') # Returns: 32.5

# Average with filters
avg_salary = (employees
             .where('department', '==', 'Engineering')
             .avg('salary'))
# Returns: 125000.0

# Average rating for active products
avg_rating = (products
             .where('active', '==', True)
             .avg('rating'))
# Returns: 4.2

Note: - Null values are ignored in the average calculation - Non-numeric values will cause an error - This is more efficient than fetching all documents

Source code in src/fire_prox/fire_query.py
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
def avg(self, field: str) -> float:
    """
    Average a numeric field across all matching documents.

    Executes an aggregation query to calculate the arithmetic mean of a
    specific field without fetching the actual documents. The field must
    contain numeric values (int or float).

    Args:
        field: Name of the numeric field to average.

    Returns:
        Average of the field values across all matching documents.
        Returns 0.0 if no documents match or if all values are null.

    Raises:
        ValueError: If field is None or empty.

    Example:
        # Average age of all users
        avg_age = users.avg('age')
        # Returns: 32.5

        # Average with filters
        avg_salary = (employees
                     .where('department', '==', 'Engineering')
                     .avg('salary'))
        # Returns: 125000.0

        # Average rating for active products
        avg_rating = (products
                     .where('active', '==', True)
                     .avg('rating'))
        # Returns: 4.2

    Note:
        - Null values are ignored in the average calculation
        - Non-numeric values will cause an error
        - This is more efficient than fetching all documents
    """
    if not field:
        raise ValueError("avg() requires a field name")

    # Create aggregation query using Query's avg method
    agg_query = self._query.avg(field, alias='avg')

    # Execute and extract result
    result = agg_query.get()
    if result and len(result) > 0:
        for agg_result in result:
            value = agg_result[0].value
            return value if value is not None else 0.0
    return 0.0

count()

Count documents matching the query.

Executes an aggregation query to count the number of documents that match the current query filters without fetching the actual documents. This is more efficient than fetching all documents and counting them.

Returns: Integer count of matching documents. Returns 0 if no documents match.

Example: # Count all users total_users = users.count() # Returns: 150

# Count with filters
active_users = users.where('active', '==', True).count()
# Returns: 42

# Count with complex query
count = (users
         .where('age', '>', 25)
         .where('country', '==', 'USA')
         .count())
# Returns: 37

Note: This uses Firestore's native aggregation API, which is more efficient than fetching documents. However, it still counts as one document read per 1000 documents in the collection.

Source code in src/fire_prox/fire_query.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
def count(self) -> int:
    """
    Count documents matching the query.

    Executes an aggregation query to count the number of documents that
    match the current query filters without fetching the actual documents.
    This is more efficient than fetching all documents and counting them.

    Returns:
        Integer count of matching documents. Returns 0 if no documents match.

    Example:
        # Count all users
        total_users = users.count()
        # Returns: 150

        # Count with filters
        active_users = users.where('active', '==', True).count()
        # Returns: 42

        # Count with complex query
        count = (users
                 .where('age', '>', 25)
                 .where('country', '==', 'USA')
                 .count())
        # Returns: 37

    Note:
        This uses Firestore's native aggregation API, which is more efficient
        than fetching documents. However, it still counts as one document read
        per 1000 documents in the collection.
    """
    # Create aggregation query using Query's count method
    agg_query = self._query.count(alias='count')

    # Execute and extract result
    result = agg_query.get()
    if result and len(result) > 0:
        # Extract count from first (and only) aggregation result
        for agg_result in result:
            return agg_result[0].value
    return 0

end_at(*document_fields_or_snapshot)

End query results at a cursor position (inclusive).

Creates a new FireQuery that ends at the specified cursor. The cursor document is included in the results.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot - Direct field values matching order_by clause order

Returns: A new FireQuery instance with the end cursor applied.

Example: # Get all users up to and including age 50 query = users.order_by('age').end_at({'age': 50})

# Using a specific document as endpoint
target_doc_ref = users.doc('user123')._doc_ref
target_snapshot = target_doc_ref.get()
query = users.order_by('age').end_at(target_snapshot)
Source code in src/fire_prox/fire_query.py
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
def end_at(self, *document_fields_or_snapshot) -> 'FireQuery':
    """
    End query results at a cursor position (inclusive).

    Creates a new FireQuery that ends at the specified cursor. The cursor
    document is included in the results.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot
            - Direct field values matching order_by clause order

    Returns:
        A new FireQuery instance with the end cursor applied.

    Example:
        # Get all users up to and including age 50
        query = users.order_by('age').end_at({'age': 50})

        # Using a specific document as endpoint
        target_doc_ref = users.doc('user123')._doc_ref
        target_snapshot = target_doc_ref.get()
        query = users.order_by('age').end_at(target_snapshot)
    """
    new_query = self._query.end_at(*document_fields_or_snapshot)
    return FireQuery(new_query, self._parent_collection, self._projection)

end_before(*document_fields_or_snapshot)

End query results before a cursor position (exclusive).

Creates a new FireQuery that ends before the specified cursor. The cursor document itself is excluded from results.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot - Direct field values matching order_by clause order

Returns: A new FireQuery instance with the end-before cursor applied.

Example: # Get all users before age 50 (exclude 50) query = users.order_by('age').end_before({'age': 50})

# Using a specific document as exclusive endpoint
target_doc_ref = users.doc('user123')._doc_ref
target_snapshot = target_doc_ref.get()
query = users.order_by('age').end_before(target_snapshot)
Source code in src/fire_prox/fire_query.py
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
def end_before(self, *document_fields_or_snapshot) -> 'FireQuery':
    """
    End query results before a cursor position (exclusive).

    Creates a new FireQuery that ends before the specified cursor. The cursor
    document itself is excluded from results.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot
            - Direct field values matching order_by clause order

    Returns:
        A new FireQuery instance with the end-before cursor applied.

    Example:
        # Get all users before age 50 (exclude 50)
        query = users.order_by('age').end_before({'age': 50})

        # Using a specific document as exclusive endpoint
        target_doc_ref = users.doc('user123')._doc_ref
        target_snapshot = target_doc_ref.get()
        query = users.order_by('age').end_before(target_snapshot)
    """
    new_query = self._query.end_before(*document_fields_or_snapshot)
    return FireQuery(new_query, self._parent_collection, self._projection)

find_nearest(vector_field, query_vector, distance_measure, limit, distance_result_field=None)

Find the nearest neighbors based on vector similarity.

Performs a vector similarity search on top of the current query filters. This allows you to combine pre-filtering with vector search (requires a composite index).

Args: vector_field: Name of the field containing vector embeddings. query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector). distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN, DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT). limit: Maximum number of nearest neighbors to return (max 1000). distance_result_field: Optional field name to store the calculated distance in the query results.

Returns: A new FireQuery instance with the vector search applied.

Example: from google.cloud.firestore_v1.base_vector_query import DistanceMeasure from google.cloud.firestore_v1.vector import Vector

# Find nearest neighbors with pre-filtering
query = (collection
         .where('category', '==', 'tech')
         .find_nearest(
             vector_field="embedding",
             query_vector=Vector([0.1, 0.2, 0.3]),
             distance_measure=DistanceMeasure.COSINE,
             limit=5
         ))
for doc in query.get():
    print(f"{doc.title}: {doc.category}")

Note: - Requires a composite index when combining with where() clauses - Maximum limit is 1000 documents - Does not work with Firestore emulator (production only)

Source code in src/fire_prox/fire_query.py
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
def find_nearest(
    self,
    vector_field: str,
    query_vector: Any,
    distance_measure: Any,
    limit: int,
    distance_result_field: Optional[str] = None,
) -> 'FireQuery':
    """
    Find the nearest neighbors based on vector similarity.

    Performs a vector similarity search on top of the current query filters.
    This allows you to combine pre-filtering with vector search (requires
    a composite index).

    Args:
        vector_field: Name of the field containing vector embeddings.
        query_vector: Vector to compare against (google.cloud.firestore_v1.vector.Vector).
        distance_measure: Distance calculation method (DistanceMeasure.EUCLIDEAN,
            DistanceMeasure.COSINE, or DistanceMeasure.DOT_PRODUCT).
        limit: Maximum number of nearest neighbors to return (max 1000).
        distance_result_field: Optional field name to store the calculated distance
            in the query results.

    Returns:
        A new FireQuery instance with the vector search applied.

    Example:
        from google.cloud.firestore_v1.base_vector_query import DistanceMeasure
        from google.cloud.firestore_v1.vector import Vector

        # Find nearest neighbors with pre-filtering
        query = (collection
                 .where('category', '==', 'tech')
                 .find_nearest(
                     vector_field="embedding",
                     query_vector=Vector([0.1, 0.2, 0.3]),
                     distance_measure=DistanceMeasure.COSINE,
                     limit=5
                 ))
        for doc in query.get():
            print(f"{doc.title}: {doc.category}")

    Note:
        - Requires a composite index when combining with where() clauses
        - Maximum limit is 1000 documents
        - Does not work with Firestore emulator (production only)
    """
    # Create vector query using native find_nearest
    new_query = self._query.find_nearest(
        vector_field=vector_field,
        query_vector=query_vector,
        distance_measure=distance_measure,
        limit=limit,
        distance_result_field=distance_result_field,
    )
    return FireQuery(new_query, self._parent_collection, self._projection)

get()

Execute the query and return results as a list.

Fetches all matching documents and hydrates them into FireObject instances in LOADED state. If a projection is active (via .select()), returns vanilla dictionaries instead of FireObject instances.

Returns: - If no projection: List of FireObject instances for all documents matching the query. - If projection active: List of dictionaries containing only the selected fields. DocumentReferences are converted to FireObjects. - Empty list if no documents match.

Example: # Get all results as FireObjects users = query.get() for user in users: print(f"{user.name}: {user.birth_year}")

# Get projected results as dictionaries
users = query.select('name', 'email').get()
for user_dict in users:
    print(f"{user_dict['name']}: {user_dict['email']}")

# Check if results exist
results = query.get()
if results:
    print(f"Found {len(results)} users")
else:
    print("No users found")
Source code in src/fire_prox/fire_query.py
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
def get(self) -> Union[List[FireObject], List[Dict[str, Any]]]:
    """
    Execute the query and return results as a list.

    Fetches all matching documents and hydrates them into FireObject
    instances in LOADED state. If a projection is active (via .select()),
    returns vanilla dictionaries instead of FireObject instances.

    Returns:
        - If no projection: List of FireObject instances for all documents
          matching the query.
        - If projection active: List of dictionaries containing only the
          selected fields. DocumentReferences are converted to FireObjects.
        - Empty list if no documents match.

    Example:
        # Get all results as FireObjects
        users = query.get()
        for user in users:
            print(f"{user.name}: {user.birth_year}")

        # Get projected results as dictionaries
        users = query.select('name', 'email').get()
        for user_dict in users:
            print(f"{user_dict['name']}: {user_dict['email']}")

        # Check if results exist
        results = query.get()
        if results:
            print(f"Found {len(results)} users")
        else:
            print("No users found")
    """
    # Execute query
    snapshots = self._query.stream()

    # If projection is active, return vanilla dictionaries
    if self._projection:
        results = []
        for snap in snapshots:
            data = snap.to_dict()
            # Convert DocumentReferences to FireObjects
            converted_data = self._convert_projection_data(data)
            results.append(converted_data)
        return results

    # Otherwise, return FireObjects as usual
    return [FireObject.from_snapshot(snap, self._parent_collection) for snap in snapshots]

limit(count)

Limit the number of results returned.

Creates a new FireQuery that will return at most count results.

Args: count: Maximum number of documents to return. Must be positive.

Returns: A new FireQuery instance with the limit applied.

Raises: ValueError: If count is not positive.

Example: # Get top 10 results query = users.order_by('score', direction='DESCENDING').limit(10)

# Get first 5 matching documents
query = users.where('active', '==', True).limit(5)
Source code in src/fire_prox/fire_query.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
def limit(self, count: int) -> 'FireQuery':
    """
    Limit the number of results returned.

    Creates a new FireQuery that will return at most `count` results.

    Args:
        count: Maximum number of documents to return. Must be positive.

    Returns:
        A new FireQuery instance with the limit applied.

    Raises:
        ValueError: If count is not positive.

    Example:
        # Get top 10 results
        query = users.order_by('score', direction='DESCENDING').limit(10)

        # Get first 5 matching documents
        query = users.where('active', '==', True).limit(5)
    """
    if count <= 0:
        raise ValueError(f"Limit count must be positive, got {count}")

    new_query = self._query.limit(count)
    return FireQuery(new_query, self._parent_collection, self._projection)

on_snapshot(callback)

Listen for real-time updates to this query.

This method sets up a real-time listener that fires the callback whenever any document matching the query changes. The listener runs on a separate thread managed by the Firestore SDK.

Important: This is a sync-only feature. The listener uses the underlying synchronous query to run on a background thread. This is the standard Firestore pattern for real-time listeners in Python.

Args: callback: Callback function invoked on query changes. Signature: callback(query_snapshot, changes, read_time) - query_snapshot: List of DocumentSnapshot objects matching the query - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED) - read_time: Timestamp of the snapshot

Returns: Watch object with an .unsubscribe() method to stop listening.

Example: import threading

callback_done = threading.Event()

def on_change(query_snapshot, changes, read_time):
    for change in changes:
        if change.type.name == 'ADDED':
            print(f"New: {change.document.id}")
        elif change.type.name == 'MODIFIED':
            print(f"Modified: {change.document.id}")
        elif change.type.name == 'REMOVED':
            print(f"Removed: {change.document.id}")
    callback_done.set()

# Listen to active users only
active_users = users.where('status', '==', 'active')
watch = active_users.on_snapshot(on_change)

# Wait for initial snapshot
callback_done.wait()

# Later: stop listening
watch.unsubscribe()

Note: The callback runs on a separate thread. Use threading primitives (Event, Lock, Queue) for synchronization with your main thread.

Source code in src/fire_prox/fire_query.py
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
def on_snapshot(self, callback: Any) -> Any:
    """
    Listen for real-time updates to this query.

    This method sets up a real-time listener that fires the callback
    whenever any document matching the query changes. The listener runs
    on a separate thread managed by the Firestore SDK.

    **Important**: This is a sync-only feature. The listener uses the
    underlying synchronous query to run on a background thread. This is
    the standard Firestore pattern for real-time listeners in Python.

    Args:
        callback: Callback function invoked on query changes.
                 Signature: callback(query_snapshot, changes, read_time)
                 - query_snapshot: List of DocumentSnapshot objects matching the query
                 - changes: List of DocumentChange objects (ADDED, MODIFIED, REMOVED)
                 - read_time: Timestamp of the snapshot

    Returns:
        Watch object with an `.unsubscribe()` method to stop listening.

    Example:
        import threading

        callback_done = threading.Event()

        def on_change(query_snapshot, changes, read_time):
            for change in changes:
                if change.type.name == 'ADDED':
                    print(f"New: {change.document.id}")
                elif change.type.name == 'MODIFIED':
                    print(f"Modified: {change.document.id}")
                elif change.type.name == 'REMOVED':
                    print(f"Removed: {change.document.id}")
            callback_done.set()

        # Listen to active users only
        active_users = users.where('status', '==', 'active')
        watch = active_users.on_snapshot(on_change)

        # Wait for initial snapshot
        callback_done.wait()

        # Later: stop listening
        watch.unsubscribe()

    Note:
        The callback runs on a separate thread. Use threading primitives
        (Event, Lock, Queue) for synchronization with your main thread.
    """
    # Use the native query's on_snapshot method directly
    return self._query.on_snapshot(callback)

order_by(field, direction='ASCENDING')

Add an ordering clause to the query.

Creates a new FireQuery with ordering by the specified field.

Args: field: The field path to order by. direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'. Default is 'ASCENDING'.

Returns: A new FireQuery instance with the ordering applied.

Example: # Ascending order query = users.order_by('birth_year')

# Descending order
query = users.order_by('birth_year', direction='DESCENDING')

# Multiple orderings (chained)
query = (users
         .order_by('country')
         .order_by('birth_year', direction='DESCENDING'))
Source code in src/fire_prox/fire_query.py
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
def order_by(self, field: str, direction: str = 'ASCENDING') -> 'FireQuery':
    """
    Add an ordering clause to the query.

    Creates a new FireQuery with ordering by the specified field.

    Args:
        field: The field path to order by.
        direction: Sort direction. Either 'ASCENDING' or 'DESCENDING'.
                  Default is 'ASCENDING'.

    Returns:
        A new FireQuery instance with the ordering applied.

    Example:
        # Ascending order
        query = users.order_by('birth_year')

        # Descending order
        query = users.order_by('birth_year', direction='DESCENDING')

        # Multiple orderings (chained)
        query = (users
                 .order_by('country')
                 .order_by('birth_year', direction='DESCENDING'))
    """
    # Convert direction string to Query constant
    if direction.upper() == 'ASCENDING':
        from google.cloud.firestore_v1 import Query as QueryClass
        direction_const = QueryClass.ASCENDING
    elif direction.upper() == 'DESCENDING':
        from google.cloud.firestore_v1 import Query as QueryClass
        direction_const = QueryClass.DESCENDING
    else:
        raise ValueError(f"Invalid direction: {direction}. Must be 'ASCENDING' or 'DESCENDING'")

    new_query = self._query.order_by(field, direction=direction_const)
    return FireQuery(new_query, self._parent_collection, self._projection)

select(*field_paths)

Select specific fields to return (projection).

Creates a new FireQuery that only returns the specified fields in the query results. When using projections, query results will be returned as vanilla dictionaries instead of FireObject instances. Any DocumentReferences in the returned dictionaries will be automatically converted to FireObject instances in ATTACHED state.

Args: *field_paths: One or more field paths to select. Field paths can include nested fields using dot notation (e.g., 'address.city').

Returns: A new FireQuery instance with the projection applied.

Raises: ValueError: If no field paths are provided.

Example: # Select a single field query = users.select('name') results = query.get() # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

# Select multiple fields
query = users.select('name', 'email', 'birth_year')
results = query.get()
# Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

# Select with filtering and ordering
query = (users
         .where('birth_year', '>', 1990)
         .select('name', 'birth_year')
         .order_by('birth_year')
         .limit(10))

# DocumentReferences are auto-converted to FireObjects
query = posts.select('title', 'author')  # author is a DocumentReference
results = query.get()
# results[0]['author'] is a FireObject, not a DocumentReference
print(results[0]['author'].name)  # Can access fields after fetch()

Note: - Projection queries return dictionaries, not FireObject instances - Only the selected fields will be present in the returned dictionaries - DocumentReferences are automatically hydrated to FireObject instances - Projected results are more bandwidth-efficient for large documents

Source code in src/fire_prox/fire_query.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
def select(self, *field_paths: str) -> 'FireQuery':
    """
    Select specific fields to return (projection).

    Creates a new FireQuery that only returns the specified fields in the
    query results. When using projections, query results will be returned
    as vanilla dictionaries instead of FireObject instances. Any
    DocumentReferences in the returned dictionaries will be automatically
    converted to FireObject instances in ATTACHED state.

    Args:
        *field_paths: One or more field paths to select. Field paths can
                     include nested fields using dot notation (e.g., 'address.city').

    Returns:
        A new FireQuery instance with the projection applied.

    Raises:
        ValueError: If no field paths are provided.

    Example:
        # Select a single field
        query = users.select('name')
        results = query.get()
        # Returns: [{'name': 'Alice'}, {'name': 'Bob'}, ...]

        # Select multiple fields
        query = users.select('name', 'email', 'birth_year')
        results = query.get()
        # Returns: [{'name': 'Alice', 'email': 'alice@example.com', 'birth_year': 1990}, ...]

        # Select with filtering and ordering
        query = (users
                 .where('birth_year', '>', 1990)
                 .select('name', 'birth_year')
                 .order_by('birth_year')
                 .limit(10))

        # DocumentReferences are auto-converted to FireObjects
        query = posts.select('title', 'author')  # author is a DocumentReference
        results = query.get()
        # results[0]['author'] is a FireObject, not a DocumentReference
        print(results[0]['author'].name)  # Can access fields after fetch()

    Note:
        - Projection queries return dictionaries, not FireObject instances
        - Only the selected fields will be present in the returned dictionaries
        - DocumentReferences are automatically hydrated to FireObject instances
        - Projected results are more bandwidth-efficient for large documents
    """
    if not field_paths:
        raise ValueError("select() requires at least one field path")

    # Create new query with projection
    new_query = self._query.select(list(field_paths))
    return FireQuery(new_query, self._parent_collection, projection=field_paths)

start_after(*document_fields_or_snapshot)

Start query results after a cursor position (exclusive).

Creates a new FireQuery that starts after the specified cursor. The cursor document itself is excluded from results. This is typically used for pagination to avoid duplicating the last document from the previous page.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot from a previous query - Direct field values matching order_by clause order

Returns: A new FireQuery instance with the start-after cursor applied.

Example: # Pagination: exclude the last document from previous page page1 = users.order_by('age').limit(10).get() last_age = page1[-1].age page2 = users.order_by('age').start_after({'age': last_age}).limit(10).get()

# Using a document snapshot (common pattern)
last_doc_ref = page1[-1]._doc_ref
last_snapshot = last_doc_ref.get()
page2 = users.order_by('age').start_after(last_snapshot).limit(10).get()
Source code in src/fire_prox/fire_query.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
def start_after(self, *document_fields_or_snapshot) -> 'FireQuery':
    """
    Start query results after a cursor position (exclusive).

    Creates a new FireQuery that starts after the specified cursor. The cursor
    document itself is excluded from results. This is typically used for
    pagination to avoid duplicating the last document from the previous page.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot from a previous query
            - Direct field values matching order_by clause order

    Returns:
        A new FireQuery instance with the start-after cursor applied.

    Example:
        # Pagination: exclude the last document from previous page
        page1 = users.order_by('age').limit(10).get()
        last_age = page1[-1].age
        page2 = users.order_by('age').start_after({'age': last_age}).limit(10).get()

        # Using a document snapshot (common pattern)
        last_doc_ref = page1[-1]._doc_ref
        last_snapshot = last_doc_ref.get()
        page2 = users.order_by('age').start_after(last_snapshot).limit(10).get()
    """
    new_query = self._query.start_after(*document_fields_or_snapshot)
    return FireQuery(new_query, self._parent_collection, self._projection)

start_at(*document_fields_or_snapshot)

Start query results at a cursor position (inclusive).

Creates a new FireQuery that starts at the specified cursor. The cursor can be a document snapshot or a dictionary of field values matching the order_by fields.

Args: *document_fields_or_snapshot: Either: - A dictionary of field values: {'field': value} - A DocumentSnapshot from a previous query - Direct field values matching order_by clause order

Returns: A new FireQuery instance with the start cursor applied.

Example: # Using field values (requires matching order_by) query = users.order_by('age').start_at({'age': 25})

# Pagination: get first page, then start at last document
page1 = users.order_by('age').limit(10).get()
last_age = page1[-1].age
page2 = users.order_by('age').start_at({'age': last_age}).limit(10).get()

# Using a document snapshot
last_doc_ref = page1[-1]._doc_ref
last_snapshot = last_doc_ref.get()
page2 = users.order_by('age').start_at(last_snapshot).limit(10).get()
Source code in src/fire_prox/fire_query.py
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
def start_at(self, *document_fields_or_snapshot) -> 'FireQuery':
    """
    Start query results at a cursor position (inclusive).

    Creates a new FireQuery that starts at the specified cursor. The cursor
    can be a document snapshot or a dictionary of field values matching the
    order_by fields.

    Args:
        *document_fields_or_snapshot: Either:
            - A dictionary of field values: {'field': value}
            - A DocumentSnapshot from a previous query
            - Direct field values matching order_by clause order

    Returns:
        A new FireQuery instance with the start cursor applied.

    Example:
        # Using field values (requires matching order_by)
        query = users.order_by('age').start_at({'age': 25})

        # Pagination: get first page, then start at last document
        page1 = users.order_by('age').limit(10).get()
        last_age = page1[-1].age
        page2 = users.order_by('age').start_at({'age': last_age}).limit(10).get()

        # Using a document snapshot
        last_doc_ref = page1[-1]._doc_ref
        last_snapshot = last_doc_ref.get()
        page2 = users.order_by('age').start_at(last_snapshot).limit(10).get()
    """
    new_query = self._query.start_at(*document_fields_or_snapshot)
    return FireQuery(new_query, self._parent_collection, self._projection)

stream()

Execute the query and stream results as an iterator.

Returns a generator that yields FireObject instances one at a time. This is more memory-efficient than .get() for large result sets as it doesn't load all results into memory at once. If a projection is active (via .select()), yields vanilla dictionaries instead.

Yields: - If no projection: FireObject instances in LOADED state for each matching document. - If projection active: Dictionaries containing only the selected fields. DocumentReferences are converted to FireObjects.

Example: # Stream results one at a time as FireObjects for user in query.stream(): print(f"{user.name}: {user.birth_year}") # Process each user without loading all users into memory

# Stream projected results as dictionaries
for user_dict in query.select('name', 'email').stream():
    print(f"{user_dict['name']}: {user_dict['email']}")

# Works with any query
for post in (posts
            .where('published', '==', True)
            .order_by('date', direction='DESCENDING')
            .stream()):
    print(post.title)
Source code in src/fire_prox/fire_query.py
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
def stream(self) -> Union[Iterator[FireObject], Iterator[Dict[str, Any]]]:
    """
    Execute the query and stream results as an iterator.

    Returns a generator that yields FireObject instances one at a time.
    This is more memory-efficient than .get() for large result sets
    as it doesn't load all results into memory at once. If a projection
    is active (via .select()), yields vanilla dictionaries instead.

    Yields:
        - If no projection: FireObject instances in LOADED state for each
          matching document.
        - If projection active: Dictionaries containing only the selected
          fields. DocumentReferences are converted to FireObjects.

    Example:
        # Stream results one at a time as FireObjects
        for user in query.stream():
            print(f"{user.name}: {user.birth_year}")
            # Process each user without loading all users into memory

        # Stream projected results as dictionaries
        for user_dict in query.select('name', 'email').stream():
            print(f"{user_dict['name']}: {user_dict['email']}")

        # Works with any query
        for post in (posts
                    .where('published', '==', True)
                    .order_by('date', direction='DESCENDING')
                    .stream()):
            print(post.title)
    """
    # If projection is active, stream vanilla dictionaries
    if self._projection:
        for snapshot in self._query.stream():
            data = snapshot.to_dict()
            # Convert DocumentReferences to FireObjects
            converted_data = self._convert_projection_data(data)
            yield converted_data
    else:
        # Otherwise, stream FireObjects as usual
        for snapshot in self._query.stream():
            yield FireObject.from_snapshot(snapshot, self._parent_collection)

sum(field)

Sum a numeric field across all matching documents.

Executes an aggregation query to sum the values of a specific field without fetching the actual documents. The field must contain numeric values (int or float).

Args: field: Name of the numeric field to sum.

Returns: Sum of the field values across all matching documents. Returns 0 if no documents match or if all values are null.

Raises: ValueError: If field is None or empty.

Example: # Sum all salaries total_salary = employees.sum('salary') # Returns: 5000000

# Sum with filters
engineering_salary = (employees
                      .where('department', '==', 'Engineering')
                      .sum('salary'))
# Returns: 2500000

# Sum revenue from active products
total_revenue = (products
                .where('active', '==', True)
                .sum('revenue'))
# Returns: 1250000.50

Note: - Null values are ignored in the sum - Non-numeric values will cause an error - This is more efficient than fetching all documents

Source code in src/fire_prox/fire_query.py
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
def sum(self, field: str) -> Union[int, float]:
    """
    Sum a numeric field across all matching documents.

    Executes an aggregation query to sum the values of a specific field
    without fetching the actual documents. The field must contain numeric
    values (int or float).

    Args:
        field: Name of the numeric field to sum.

    Returns:
        Sum of the field values across all matching documents.
        Returns 0 if no documents match or if all values are null.

    Raises:
        ValueError: If field is None or empty.

    Example:
        # Sum all salaries
        total_salary = employees.sum('salary')
        # Returns: 5000000

        # Sum with filters
        engineering_salary = (employees
                              .where('department', '==', 'Engineering')
                              .sum('salary'))
        # Returns: 2500000

        # Sum revenue from active products
        total_revenue = (products
                        .where('active', '==', True)
                        .sum('revenue'))
        # Returns: 1250000.50

    Note:
        - Null values are ignored in the sum
        - Non-numeric values will cause an error
        - This is more efficient than fetching all documents
    """
    if not field:
        raise ValueError("sum() requires a field name")

    # Create aggregation query using Query's sum method
    agg_query = self._query.sum(field, alias='sum')

    # Execute and extract result
    result = agg_query.get()
    if result and len(result) > 0:
        for agg_result in result:
            return agg_result[0].value
    return 0

where(field, op, value)

Add a filter condition to the query.

Creates a new FireQuery with an additional filter condition. Uses the immutable pattern - returns a new instance rather than modifying the current query.

Args: field: The field path to filter on (e.g., 'name', 'address.city'). op: Comparison operator. Supported operators: '==' (equal), '!=' (not equal), '<' (less than), '<=' (less than or equal), '>' (greater than), '>=' (greater than or equal), 'in' (value in list), 'not-in' (value not in list), 'array-contains' (array contains value), 'array-contains-any' (array contains any of the values). value: The value to compare against.

Returns: A new FireQuery instance with the added filter.

Example: # Single condition query = users.where('birth_year', '>', 1800)

# Multiple conditions (chained)
query = (users
         .where('birth_year', '>', 1800)
         .where('country', '==', 'England'))
Source code in src/fire_prox/fire_query.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def where(self, field: str, op: str, value: Any) -> 'FireQuery':
    """
    Add a filter condition to the query.

    Creates a new FireQuery with an additional filter condition.
    Uses the immutable pattern - returns a new instance rather than
    modifying the current query.

    Args:
        field: The field path to filter on (e.g., 'name', 'address.city').
        op: Comparison operator. Supported operators:
            '==' (equal), '!=' (not equal),
            '<' (less than), '<=' (less than or equal),
            '>' (greater than), '>=' (greater than or equal),
            'in' (value in list), 'not-in' (value not in list),
            'array-contains' (array contains value),
            'array-contains-any' (array contains any of the values).
        value: The value to compare against.

    Returns:
        A new FireQuery instance with the added filter.

    Example:
        # Single condition
        query = users.where('birth_year', '>', 1800)

        # Multiple conditions (chained)
        query = (users
                 .where('birth_year', '>', 1800)
                 .where('country', '==', 'England'))
    """
    # Create FieldFilter and add to query
    filter_obj = FieldFilter(field, op, value)
    new_query = self._query.where(filter=filter_obj)
    return FireQuery(new_query, self._parent_collection, self._projection)

fireprox

FireProx: Main entry point for the library (synchronous).

This module provides the synchronous FireProx class, which serves as the primary interface for users to interact with Firestore through the simplified FireProx API.

FireProx

Bases: BaseFireProx

Main entry point for the FireProx library (synchronous).

FireProx wraps the native google-cloud-firestore Client and provides a simplified, Pythonic interface for working with Firestore. It delegates authentication and client configuration to the official library while providing higher-level abstractions for document and collection access.

The design philosophy is "wrap, don't replace" - FireProx leverages the reliability and security of the native client while providing a more intuitive developer experience optimized for rapid prototyping.

This is the synchronous implementation that supports lazy loading.

Usage Examples: # Initialize with a pre-configured native client from google.cloud import firestore from fire_prox import FireProx

native_client = firestore.Client(project='my-project')
db = FireProx(native_client)

# Access a document (ATTACHED state, lazy loading)
user = db.doc('users/alovelace')
print(user.name)  # Automatically fetches data

# Create a new document
users = db.collection('users')
new_user = users.new()
new_user.name = 'Charles Babbage'
new_user.year = 1791
new_user.save()

# Update a document
user = db.doc('users/alovelace')
user.year = 1816
user.save()

# Delete a document
user.delete()
Source code in src/fire_prox/fireprox.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
class FireProx(BaseFireProx):
    """
    Main entry point for the FireProx library (synchronous).

    FireProx wraps the native google-cloud-firestore Client and provides a
    simplified, Pythonic interface for working with Firestore. It delegates
    authentication and client configuration to the official library while
    providing higher-level abstractions for document and collection access.

    The design philosophy is "wrap, don't replace" - FireProx leverages the
    reliability and security of the native client while providing a more
    intuitive developer experience optimized for rapid prototyping.

    This is the synchronous implementation that supports lazy loading.

    Usage Examples:
        # Initialize with a pre-configured native client
        from google.cloud import firestore
        from fire_prox import FireProx

        native_client = firestore.Client(project='my-project')
        db = FireProx(native_client)

        # Access a document (ATTACHED state, lazy loading)
        user = db.doc('users/alovelace')
        print(user.name)  # Automatically fetches data

        # Create a new document
        users = db.collection('users')
        new_user = users.new()
        new_user.name = 'Charles Babbage'
        new_user.year = 1791
        new_user.save()

        # Update a document
        user = db.doc('users/alovelace')
        user.year = 1816
        user.save()

        # Delete a document
        user.delete()
    """

    def __init__(self, client: FirestoreClient):
        """
        Initialize FireProx with a native Firestore client.

        Args:
            client: A configured google.cloud.firestore.Client instance.
                   Authentication and project configuration should be handled
                   before creating this instance.

        Raises:
            TypeError: If client is not a google.cloud.firestore.Client instance.

        Example:
            from google.cloud import firestore
            from fire_prox import FireProx

            # Option 1: Default credentials
            native_client = firestore.Client()

            # Option 2: Explicit project
            native_client = firestore.Client(project='my-project-id')

            # Option 3: Service account
            native_client = firestore.Client.from_service_account_json(
                'path/to/credentials.json'
            )

            # Initialize FireProx
            db = FireProx(native_client)
        """
        # Type checking for sync client
        if not isinstance(client, FirestoreClient):
            raise TypeError(
                f"client must be a google.cloud.firestore.Client, got {type(client)}"
            )

        # Initialize base class
        super().__init__(client)

    # =========================================================================
    # Document Access
    # =========================================================================

    def doc(self, path: str) -> FireObject:
        """
        Get a reference to a document by its full path.

        Creates a FireObject in ATTACHED state. No data is fetched from
        Firestore until an attribute is accessed (lazy loading).

        Args:
            path: The full document path, e.g., 'users/alovelace' or
                 'users/uid/posts/post123'. Must be a valid Firestore
                 document path with an even number of segments.

        Returns:
            A FireObject instance in ATTACHED state.

        Raises:
            ValueError: If path has an odd number of segments (invalid
                       document path) or contains invalid characters.

        Example:
            # Root-level document
            user = db.doc('users/alovelace')

            # Nested document (subcollection)
            post = db.doc('users/alovelace/posts/post123')

            # Lazy loading
            print(user.name)  # Triggers fetch on first access
        """
        return self._create_document_proxy(path, FireObject)

    def document(self, path: str) -> FireObject:
        """
        Alias for doc(). Get a reference to a document by its full path.

        Provided for API consistency with the native library and user
        preference. Functionally identical to doc().

        Args:
            path: The full document path.

        Returns:
            A FireObject instance in ATTACHED state.
        """
        return self.doc(path)

    # =========================================================================
    # Collection Access
    # =========================================================================

    def collection(self, path: str) -> FireCollection:
        """
        Get a reference to a collection by its path.

        Creates a FireCollection wrapper around the native CollectionReference.
        Used for creating new documents or (in Phase 2) querying.

        Args:
            path: The collection path, e.g., 'users' or 'users/uid/posts'.
                 Can be a root-level collection (odd number of segments) or
                 a subcollection path.

        Returns:
            A FireCollection instance.

        Raises:
            ValueError: If path has an even number of segments (invalid
                       collection path) or contains invalid characters.

        Example:
            # Root-level collection
            users = db.collection('users')
            new_user = users.new()
            new_user.name = 'Ada'
            new_user.save()

            # Subcollection
            posts = db.collection('users/alovelace/posts')
            new_post = posts.new()
            new_post.title = 'Analysis Engine'
            new_post.save()
        """
        return self._create_collection_proxy(path, FireCollection)

    def collections(self, path: str, *, names_only: bool = False) -> list[Any]:
        """
        List subcollections beneath the specified document path.

        Args:
            path: Document path whose subcollections should be listed.
            names_only: Return collection IDs instead of FireCollection wrappers.

        Returns:
            List of subcollection names or FireCollection wrappers.
        """
        document = self.doc(path)
        return document.collections(names_only=names_only)

__init__(client)

Initialize FireProx with a native Firestore client.

Args: client: A configured google.cloud.firestore.Client instance. Authentication and project configuration should be handled before creating this instance.

Raises: TypeError: If client is not a google.cloud.firestore.Client instance.

Example: from google.cloud import firestore from fire_prox import FireProx

# Option 1: Default credentials
native_client = firestore.Client()

# Option 2: Explicit project
native_client = firestore.Client(project='my-project-id')

# Option 3: Service account
native_client = firestore.Client.from_service_account_json(
    'path/to/credentials.json'
)

# Initialize FireProx
db = FireProx(native_client)
Source code in src/fire_prox/fireprox.py
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
def __init__(self, client: FirestoreClient):
    """
    Initialize FireProx with a native Firestore client.

    Args:
        client: A configured google.cloud.firestore.Client instance.
               Authentication and project configuration should be handled
               before creating this instance.

    Raises:
        TypeError: If client is not a google.cloud.firestore.Client instance.

    Example:
        from google.cloud import firestore
        from fire_prox import FireProx

        # Option 1: Default credentials
        native_client = firestore.Client()

        # Option 2: Explicit project
        native_client = firestore.Client(project='my-project-id')

        # Option 3: Service account
        native_client = firestore.Client.from_service_account_json(
            'path/to/credentials.json'
        )

        # Initialize FireProx
        db = FireProx(native_client)
    """
    # Type checking for sync client
    if not isinstance(client, FirestoreClient):
        raise TypeError(
            f"client must be a google.cloud.firestore.Client, got {type(client)}"
        )

    # Initialize base class
    super().__init__(client)

collection(path)

Get a reference to a collection by its path.

Creates a FireCollection wrapper around the native CollectionReference. Used for creating new documents or (in Phase 2) querying.

Args: path: The collection path, e.g., 'users' or 'users/uid/posts'. Can be a root-level collection (odd number of segments) or a subcollection path.

Returns: A FireCollection instance.

Raises: ValueError: If path has an even number of segments (invalid collection path) or contains invalid characters.

Example: # Root-level collection users = db.collection('users') new_user = users.new() new_user.name = 'Ada' new_user.save()

# Subcollection
posts = db.collection('users/alovelace/posts')
new_post = posts.new()
new_post.title = 'Analysis Engine'
new_post.save()
Source code in src/fire_prox/fireprox.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
def collection(self, path: str) -> FireCollection:
    """
    Get a reference to a collection by its path.

    Creates a FireCollection wrapper around the native CollectionReference.
    Used for creating new documents or (in Phase 2) querying.

    Args:
        path: The collection path, e.g., 'users' or 'users/uid/posts'.
             Can be a root-level collection (odd number of segments) or
             a subcollection path.

    Returns:
        A FireCollection instance.

    Raises:
        ValueError: If path has an even number of segments (invalid
                   collection path) or contains invalid characters.

    Example:
        # Root-level collection
        users = db.collection('users')
        new_user = users.new()
        new_user.name = 'Ada'
        new_user.save()

        # Subcollection
        posts = db.collection('users/alovelace/posts')
        new_post = posts.new()
        new_post.title = 'Analysis Engine'
        new_post.save()
    """
    return self._create_collection_proxy(path, FireCollection)

collections(path, *, names_only=False)

List subcollections beneath the specified document path.

Args: path: Document path whose subcollections should be listed. names_only: Return collection IDs instead of FireCollection wrappers.

Returns: List of subcollection names or FireCollection wrappers.

Source code in src/fire_prox/fireprox.py
187
188
189
190
191
192
193
194
195
196
197
198
199
def collections(self, path: str, *, names_only: bool = False) -> list[Any]:
    """
    List subcollections beneath the specified document path.

    Args:
        path: Document path whose subcollections should be listed.
        names_only: Return collection IDs instead of FireCollection wrappers.

    Returns:
        List of subcollection names or FireCollection wrappers.
    """
    document = self.doc(path)
    return document.collections(names_only=names_only)

doc(path)

Get a reference to a document by its full path.

Creates a FireObject in ATTACHED state. No data is fetched from Firestore until an attribute is accessed (lazy loading).

Args: path: The full document path, e.g., 'users/alovelace' or 'users/uid/posts/post123'. Must be a valid Firestore document path with an even number of segments.

Returns: A FireObject instance in ATTACHED state.

Raises: ValueError: If path has an odd number of segments (invalid document path) or contains invalid characters.

Example: # Root-level document user = db.doc('users/alovelace')

# Nested document (subcollection)
post = db.doc('users/alovelace/posts/post123')

# Lazy loading
print(user.name)  # Triggers fetch on first access
Source code in src/fire_prox/fireprox.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
def doc(self, path: str) -> FireObject:
    """
    Get a reference to a document by its full path.

    Creates a FireObject in ATTACHED state. No data is fetched from
    Firestore until an attribute is accessed (lazy loading).

    Args:
        path: The full document path, e.g., 'users/alovelace' or
             'users/uid/posts/post123'. Must be a valid Firestore
             document path with an even number of segments.

    Returns:
        A FireObject instance in ATTACHED state.

    Raises:
        ValueError: If path has an odd number of segments (invalid
                   document path) or contains invalid characters.

    Example:
        # Root-level document
        user = db.doc('users/alovelace')

        # Nested document (subcollection)
        post = db.doc('users/alovelace/posts/post123')

        # Lazy loading
        print(user.name)  # Triggers fetch on first access
    """
    return self._create_document_proxy(path, FireObject)

document(path)

Alias for doc(). Get a reference to a document by its full path.

Provided for API consistency with the native library and user preference. Functionally identical to doc().

Args: path: The full document path.

Returns: A FireObject instance in ATTACHED state.

Source code in src/fire_prox/fireprox.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
def document(self, path: str) -> FireObject:
    """
    Alias for doc(). Get a reference to a document by its full path.

    Provided for API consistency with the native library and user
    preference. Functionally identical to doc().

    Args:
        path: The full document path.

    Returns:
        A FireObject instance in ATTACHED state.
    """
    return self.doc(path)

state

State management for FireObject instances.

This module defines the state machine that governs the lifecycle of FireObject instances as they transition between different states of synchronization with Firestore.

State

Bases: Enum

Represents the synchronization state of a FireObject with Firestore.

The state machine ensures that FireObject instances correctly manage their lifecycle from creation through deletion, tracking whether data has been loaded from Firestore and whether local modifications need to be saved.

States: DETACHED: Object exists only in Python memory with no Firestore reference. This is the initial state for newly created documents that haven't been saved yet. All data is considered "dirty" as it's new.

ATTACHED: Object is linked to a Firestore document path and has a valid
         DocumentReference, but the document's data has not yet been fetched.
         This enables lazy loading - the reference exists but no network
         request has been made yet.

LOADED:  Object is fully synchronized with Firestore. It has a reference
         and its data has been fetched from the server into the local cache.
         This is the primary operational state for reading and modifying data.

DELETED: Object represents a document that has been deleted from Firestore.
         It retains its ID and path for reference but is marked as defunct
         to prevent further modifications or save operations.

State Transitions: DETACHED -> LOADED: Via save() with optional doc_id ATTACHED -> LOADED: Via fetch() or implicit fetch on attribute access LOADED -> LOADED: Via save() (if dirty) or fetch() (refresh) LOADED -> DELETED: Via delete()

Source code in src/fire_prox/state.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
class State(Enum):
    """
    Represents the synchronization state of a FireObject with Firestore.

    The state machine ensures that FireObject instances correctly manage their
    lifecycle from creation through deletion, tracking whether data has been
    loaded from Firestore and whether local modifications need to be saved.

    States:
        DETACHED: Object exists only in Python memory with no Firestore reference.
                 This is the initial state for newly created documents that haven't
                 been saved yet. All data is considered "dirty" as it's new.

        ATTACHED: Object is linked to a Firestore document path and has a valid
                 DocumentReference, but the document's data has not yet been fetched.
                 This enables lazy loading - the reference exists but no network
                 request has been made yet.

        LOADED:  Object is fully synchronized with Firestore. It has a reference
                 and its data has been fetched from the server into the local cache.
                 This is the primary operational state for reading and modifying data.

        DELETED: Object represents a document that has been deleted from Firestore.
                 It retains its ID and path for reference but is marked as defunct
                 to prevent further modifications or save operations.

    State Transitions:
        DETACHED -> LOADED:  Via save() with optional doc_id
        ATTACHED -> LOADED:  Via fetch() or implicit fetch on attribute access
        LOADED -> LOADED:    Via save() (if dirty) or fetch() (refresh)
        LOADED -> DELETED:   Via delete()
    """

    DETACHED = auto()  # No Firestore reference, exists only in memory
    ATTACHED = auto()  # Has reference but data not yet fetched (lazy)
    LOADED = auto()    # Has reference and data is loaded
    DELETED = auto()   # Document has been deleted from Firestore

    def __str__(self) -> str:
        """Return a human-readable string representation of the state."""
        return self.name

    def __repr__(self) -> str:
        """Return a detailed representation of the state."""
        return f"State.{self.name}"

__repr__()

Return a detailed representation of the state.

Source code in src/fire_prox/state.py
54
55
56
def __repr__(self) -> str:
    """Return a detailed representation of the state."""
    return f"State.{self.name}"

__str__()

Return a human-readable string representation of the state.

Source code in src/fire_prox/state.py
50
51
52
def __str__(self) -> str:
    """Return a human-readable string representation of the state."""
    return self.name

testing

FirestoreProjectCleanupError

Bases: RuntimeError

Raised when the Firestore emulator project could not be deleted.

Source code in src/fire_prox/testing/__init__.py
67
68
class FirestoreProjectCleanupError(RuntimeError):
    """Raised when the Firestore emulator project could not be deleted."""

FirestoreTestHarness

Utility that cleans up the Firestore emulator project before and after tests.

Source code in src/fire_prox/testing/__init__.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
class FirestoreTestHarness:
    """Utility that cleans up the Firestore emulator project before and after tests."""

    def __init__(self, project_id: str = DEFAULT_PROJECT_ID):
        self.project_id = project_id

    def cleanup(self) -> None:
        cleanup_firestore(self.project_id)

    def setup(self) -> None:
        self.cleanup()

    def teardown(self) -> None:
        self.cleanup()

    def __enter__(self) -> "FirestoreTestHarness":
        self.setup()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb) -> Optional[bool]:
        self.teardown()
        return None

async_demo_client()

Create an async demo Firestore client.

If NOTEBOOK_CI environment variable is set, returns a standard async testing client. Otherwise, returns a client configured for the developer emulator (port 9090).

Source code in src/fire_prox/testing/__init__.py
55
56
57
58
59
60
61
62
63
64
65
def async_demo_client():
    """
    Create an async demo Firestore client.

    If NOTEBOOK_CI environment variable is set, returns a standard async testing client.
    Otherwise, returns a client configured for the developer emulator (port 9090).
    """
    # In CI/test environment, use standard async test client
    if not os.getenv("NOTEBOOK_CI"):
        os.environ["FIRESTORE_EMULATOR_HOST"] = DEMO_HOST
    return async_testing_client()

async_testing_client()

Create an asynchronous Firestore client configured to connect to the emulator.

Source code in src/fire_prox/testing/__init__.py
21
22
23
24
25
26
def async_testing_client():
    """Create an asynchronous Firestore client configured to connect to the emulator."""
    check_emulator()
    return firestore.AsyncClient(
        project=DEFAULT_PROJECT_ID,
    )

check_emulator()

Check if the Firestore emulator is running.

Source code in src/fire_prox/testing/__init__.py
30
31
32
33
34
35
36
37
38
39
40
41
def check_emulator():
    """Check if the Firestore emulator is running."""
    try:
        host = os.environ["FIRESTORE_EMULATOR_HOST"]
        url = f"http://{host}"
        response = requests.get(url, timeout=2)
        return response.status_code == 200
    except Exception as e:
        msg = (f"Firestore emulator is not running at {host}")
        if host == DEMO_HOST:
            msg += "\nYou can start the emulator with `pnpm developer-emulator`"
        raise RuntimeError(msg) from e

cleanup_firestore(project_id=DEFAULT_PROJECT_ID, db_or_client=None)

Delete all documents in the given project on the Firestore emulator.

Source code in src/fire_prox/testing/__init__.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
def cleanup_firestore(
    project_id: str = DEFAULT_PROJECT_ID,
    db_or_client: firestore.Client | firestore.AsyncClient | FireProx | AsyncFireProx | None = None
) -> None:
    """Delete all documents in the given project on the Firestore emulator."""
    emulator_host = _get_emulator_host(db_or_client)
    url = f"http://{emulator_host}/emulator/v1/projects/{project_id}/databases/(default)/documents"
    try:
        response = requests.delete(url, timeout=10)
    except requests.RequestException as exc:
        raise FirestoreProjectCleanupError(f"Failed to connect to Firestore emulator at {url}") from exc

    if not (200 <= response.status_code < 300):
        raise FirestoreProjectCleanupError(
            f"Firestore emulator returned {response.status_code} when deleting project {project_id}: {response.text}"
        )

demo_client()

Create a demo Firestore client.

If NOTEBOOK_CI environment variable is set, returns a standard testing client. Otherwise, returns a client configured for the developer emulator (port 9090).

Source code in src/fire_prox/testing/__init__.py
43
44
45
46
47
48
49
50
51
52
53
def demo_client():
    """
    Create a demo Firestore client.

    If NOTEBOOK_CI environment variable is set, returns a standard testing client.
    Otherwise, returns a client configured for the developer emulator (port 9090).
    """
    # In CI/test environment, use standard test client
    if not os.getenv('NOTEBOOK_CI'):
        os.environ['FIRESTORE_EMULATOR_HOST'] = DEMO_HOST
    return testing_client()

firestore_harness(project_id=DEFAULT_PROJECT_ID)

Context manager that ensures Firestore cleanup in setup/teardown.

Source code in src/fire_prox/testing/__init__.py
134
135
136
137
138
139
@contextmanager
def firestore_harness(project_id: str = DEFAULT_PROJECT_ID) -> Iterator[FirestoreTestHarness]:
    """Context manager that ensures Firestore cleanup in setup/teardown."""
    harness = FirestoreTestHarness(project_id=project_id)
    with harness:
        yield harness

firestore_test_harness()

Pytest fixture that yields a FirestoreTestHarness.

Source code in src/fire_prox/testing/__init__.py
148
149
150
151
152
@pytest.fixture(scope="function")
def firestore_test_harness() -> Iterator[FirestoreTestHarness]:
    """Pytest fixture that yields a FirestoreTestHarness."""
    with firestore_harness() as harness:
        yield harness

testing_client()

Create a synchronous Firestore client configured to connect to the emulator.

Source code in src/fire_prox/testing/__init__.py
13
14
15
16
17
18
def testing_client():
    """Create a synchronous Firestore client configured to connect to the emulator."""
    check_emulator()
    return firestore.Client(
        project=DEFAULT_PROJECT_ID,
    )