Skip to content

Commit b8a9c65

Browse files
committed
store: Use AsyncPgConnection in remaining places
1 parent 54ab7b2 commit b8a9c65

File tree

6 files changed

+24
-24
lines changed

6 files changed

+24
-24
lines changed

store/postgres/src/advisory_lock.rs

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ use diesel_async::RunQueryDsl;
2020
use graph::prelude::StoreError;
2121

2222
use crate::command_support::catalog::Site;
23-
use crate::pool::PgConnection;
2423
use crate::primary::DeploymentId;
2524
use crate::AsyncPgConnection;
2625

@@ -57,7 +56,7 @@ impl Scope {
5756

5857
/// Lock the deployment in this scope with the given id. Blocks until we
5958
/// can get the lock
60-
async fn lock(&self, conn: &mut PgConnection, id: DeploymentId) -> Result<(), StoreError> {
59+
async fn lock(&self, conn: &mut AsyncPgConnection, id: DeploymentId) -> Result<(), StoreError> {
6160
sql_query(format!("select pg_advisory_lock({}, {id})", self.id))
6261
.execute(conn)
6362
.await
@@ -120,12 +119,18 @@ where
120119

121120
/// Take the lock used to keep two copy operations to run simultaneously on
122121
/// the same deployment. Block until we can get the lock
123-
pub(crate) async fn lock_copying(conn: &mut PgConnection, dst: &Site) -> Result<(), StoreError> {
122+
pub(crate) async fn lock_copying(
123+
conn: &mut AsyncPgConnection,
124+
dst: &Site,
125+
) -> Result<(), StoreError> {
124126
COPY.lock(conn, dst.id).await
125127
}
126128

127129
/// Release the lock acquired with `lock_copying`.
128-
pub(crate) async fn unlock_copying(conn: &mut PgConnection, dst: &Site) -> Result<(), StoreError> {
130+
pub(crate) async fn unlock_copying(
131+
conn: &mut AsyncPgConnection,
132+
dst: &Site,
133+
) -> Result<(), StoreError> {
129134
COPY.unlock(conn, dst.id).await
130135
}
131136

store/postgres/src/block_store.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@ use graph::{prelude::StoreError, util::timed_cache::TimedCache};
2525
use crate::{
2626
chain_head_listener::ChainHeadUpdateSender,
2727
chain_store::{ChainStoreMetrics, Storage},
28-
pool::{ConnectionPool, PgConnection},
28+
pool::ConnectionPool,
2929
primary::Mirror as PrimaryMirror,
30-
ChainStore, NotificationSender, Shard, PRIMARY_SHARD,
30+
AsyncPgConnection, ChainStore, NotificationSender, Shard, PRIMARY_SHARD,
3131
};
3232

3333
use self::primary::Chain;
@@ -58,7 +58,7 @@ pub mod primary {
5858
prelude::StoreError,
5959
};
6060

61-
use crate::{chain_store::Storage, pool::PgConnection, AsyncPgConnection};
61+
use crate::{chain_store::Storage, AsyncPgConnection};
6262
use crate::{ConnectionPool, Shard};
6363

6464
table! {
@@ -119,7 +119,7 @@ pub mod primary {
119119
}
120120

121121
pub async fn add_chain(
122-
conn: &mut PgConnection,
122+
conn: &mut AsyncPgConnection,
123123
name: &str,
124124
shard: &Shard,
125125
ident: ChainIdentifier,
@@ -174,7 +174,7 @@ pub mod primary {
174174

175175
// update chain name where chain name is 'name'
176176
pub async fn update_chain_name(
177-
conn: &mut PgConnection,
177+
conn: &mut AsyncPgConnection,
178178
name: &str,
179179
new_name: &str,
180180
) -> Result<(), StoreError> {
@@ -344,7 +344,7 @@ impl BlockStore {
344344
}
345345

346346
pub async fn allocate_chain(
347-
conn: &mut PgConnection,
347+
conn: &mut AsyncPgConnection,
348348
name: &String,
349349
shard: &Shard,
350350
ident: &ChainIdentifier,

store/postgres/src/catalog.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ use graph::{
2323
prelude::{lazy_static, StoreError, BLOCK_NUMBER_MAX},
2424
};
2525

26-
use crate::pool::PgConnection;
2726
use crate::AsyncPgConnection;
2827
use crate::{
2928
block_range::BLOCK_RANGE_COLUMN,
@@ -578,7 +577,7 @@ pub async fn drop_schema(conn: &mut AsyncPgConnection, nsp: &str) -> Result<(),
578577
Ok(conn.batch_execute(&query).await?)
579578
}
580579

581-
pub async fn migration_count(conn: &mut PgConnection) -> Result<usize, StoreError> {
580+
pub async fn migration_count(conn: &mut AsyncPgConnection) -> Result<usize, StoreError> {
582581
use __diesel_schema_migrations as m;
583582

584583
if !table_exists(conn, NAMESPACE_PUBLIC, &MIGRATIONS_TABLE).await? {

store/postgres/src/deployment.rs

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,7 @@
11
//! Utilities for dealing with deployment metadata. Any connection passed
22
//! into these methods must be for the shard that holds the actual
33
//! deployment data and metadata
4-
use crate::{
5-
advisory_lock, detail::GraphNodeVersion, pool::PgConnection, primary::DeploymentId,
6-
AsyncPgConnection,
7-
};
4+
use crate::{advisory_lock, detail::GraphNodeVersion, primary::DeploymentId, AsyncPgConnection};
85
use diesel::{
96
dsl::{count, delete, insert_into, now, select, sql, update},
107
sql_types::{Bool, Integer},
@@ -989,7 +986,7 @@ pub(crate) async fn insert_subgraph_errors(
989986

990987
#[cfg(debug_assertions)]
991988
pub(crate) async fn error_count(
992-
conn: &mut PgConnection,
989+
conn: &mut AsyncPgConnection,
993990
id: &DeploymentHash,
994991
) -> Result<usize, StoreError> {
995992
use subgraph_error as e;
@@ -1116,7 +1113,7 @@ pub(crate) async fn revert_subgraph_errors(
11161113
}
11171114

11181115
pub(crate) async fn delete_error(
1119-
conn: &mut PgConnection,
1116+
conn: &mut AsyncPgConnection,
11201117
error_id: &str,
11211118
) -> Result<(), StoreError> {
11221119
use subgraph_error as e;
@@ -1130,7 +1127,7 @@ pub(crate) async fn delete_error(
11301127
/// Copy the dynamic data sources for `src` to `dst`. All data sources that
11311128
/// were created up to and including `target_block` will be copied.
11321129
pub(crate) async fn copy_errors(
1133-
conn: &mut PgConnection,
1130+
conn: &mut AsyncPgConnection,
11341131
src: &Site,
11351132
dst: &Site,
11361133
target_block: &BlockPtr,
@@ -1390,7 +1387,7 @@ pub async fn set_earliest_block(
13901387
/// go across shards and use the metadata tables mapped into the shard for
13911388
/// `conn` which must be the shard for `dst`
13921389
pub async fn copy_earliest_block(
1393-
conn: &mut PgConnection,
1390+
conn: &mut AsyncPgConnection,
13941391
src: &Site,
13951392
dst: &Site,
13961393
) -> Result<(), StoreError> {

store/postgres/src/primary.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
use crate::{
55
block_range::UNVERSIONED_RANGE,
66
detail::DeploymentDetail,
7-
pool::{PgConnection, PRIMARY_PUBLIC},
7+
pool::PRIMARY_PUBLIC,
88
subgraph_store::{unused, Shard, PRIMARY_SHARD},
99
AsyncPgConnection, ConnectionPool, ForeignServer, NotificationSender,
1010
};
@@ -2019,7 +2019,7 @@ impl Primary {
20192019

20202020
/// Return `true` if we deem this installation to be empty, defined as
20212021
/// having no deployments and no subgraph names in the database
2022-
pub async fn is_empty(conn: &mut PgConnection) -> Result<bool, StoreError> {
2022+
pub async fn is_empty(conn: &mut AsyncPgConnection) -> Result<bool, StoreError> {
20232023
use deployment_schemas as ds;
20242024
use subgraph as s;
20252025

store/postgres/src/relational/index.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ use graph::prelude::{
1818
use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN};
1919
use crate::command_support::catalog::Site;
2020
use crate::deployment_store::DeploymentStore;
21-
use crate::pool::PgConnection;
2221
use crate::primary::Namespace;
2322
use crate::relational::{BYTE_ARRAY_PREFIX_SIZE, STRING_PREFIX_SIZE};
2423
use crate::{catalog, AsyncPgConnection};
@@ -822,7 +821,7 @@ impl IndexList {
822821

823822
pub async fn recreate_invalid_indexes(
824823
&self,
825-
conn: &mut PgConnection,
824+
conn: &mut AsyncPgConnection,
826825
layout: &Layout,
827826
) -> Result<(), StoreError> {
828827
#[derive(QueryableByName, Debug)]

0 commit comments

Comments
 (0)