Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions .github/workflows/pr-sizer.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
name: PR Size Labeler

on:
pull_request_target:
branches: integ-testing

jobs:
authorization-check:
permissions: read-all
runs-on: ubuntu-latest
outputs:
approval-env: ${{ steps.collab-check.outputs.result }}
steps:
- name: Collaborator Check
uses: actions/github-script@v8
id: collab-check
with:
result-encoding: string
script: |
try {
const permissionResponse = await github.rest.repos.getCollaboratorPermissionLevel({
owner: context.repo.owner,
repo: context.repo.repo,
username: context.payload.pull_request.user.login,
});
const permission = permissionResponse.data.permission;
const hasWriteAccess = ['write', 'admin'].includes(permission);
if (!hasWriteAccess) {
console.log(`User ${context.payload.pull_request.user.login} does not have write access`);
return "manual-approval"
} else {
console.log(`Verified ${context.payload.pull_request.user.login} has write access. Auto approving.`)
return "auto-approve"
}
} catch (error) {
console.log(`${context.payload.pull_request.user.login} does not have write access.`)
return "manual-approval"
}

size-label:
runs-on: ubuntu-latest
needs: authorization-check
if: needs.authorization-check.outputs.approval-env == 'auto-approve'
permissions:
contents: read
pull-requests: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Calculate PR size and apply label
uses: actions/github-script@v8
with:
script: |
const pr = context.payload.pull_request;
const totalChanges = pr.additions + pr.deletions;

let sizeLabel;
if (totalChanges <= 20) sizeLabel = 'size/xs';
else if (totalChanges <= 100) sizeLabel = 'size/s';
else if (totalChanges <= 500) sizeLabel = 'size/m';
else if (totalChanges <= 1000) sizeLabel = 'size/l';
else {
sizeLabel = 'size/xl';
core.setFailed(`PR is too large (${totalChanges} lines). Please split into smaller PRs.`);
}

await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
labels: [sizeLabel]
});
77 changes: 0 additions & 77 deletions tests_integ/models/test_conformance.py
Original file line number Diff line number Diff line change
@@ -1,77 +0,0 @@
from unittest import SkipTest

import pytest
from pydantic import BaseModel

from strands import Agent
from strands.models import Model
from tests_integ.models.providers import ProviderInfo, all_providers, cohere, llama, mistral


def get_models():
return [
pytest.param(
provider_info,
id=provider_info.id, # Adds the provider name to the test name
marks=provider_info.mark, # ignores tests that don't have the requirements
)
for provider_info in all_providers
]


@pytest.fixture(params=get_models())
def provider_info(request) -> ProviderInfo:
return request.param


@pytest.fixture()
def skip_for(provider_info: list[ProviderInfo]):
"""A fixture which provides a function to skip the test if the provider is one of the providers specified."""

def skip_for_any_provider_in_list(providers: list[ProviderInfo], description: str):
"""Skips the current test is the provider is one of those provided."""
if provider_info in providers:
raise SkipTest(f"Skipping test for {provider_info.id}: {description}")

return skip_for_any_provider_in_list


@pytest.fixture()
def model(provider_info):
return provider_info.create_model()


def test_model_can_be_constructed(model: Model, skip_for):
assert model is not None
pass


def test_structured_output_is_forced(skip_for, model):
"""Tests that structured_output is always forced to return a value even if model doesn't have any information."""
skip_for([mistral, cohere, llama], "structured_output is not forced for provider ")

class Weather(BaseModel):
time: str
weather: str

agent = Agent(model)

result = agent.structured_output(Weather, "How are you?")
assert isinstance(result, Weather)


def test_structured_output_is_forced_when_provided_in_agent_invocation(skip_for, model):
"""Tests that structured_output is always forced to return a value even if model doesn't have any information."""

class UserProfile(BaseModel):
"""Basic user profile model."""

name: str
age: int
occupation: str

agent = Agent()
result = agent("Create a profile for John who is a 25 year old dentist", structured_output_model=UserProfile)
assert result.structured_output.name == "John"
assert result.structured_output.age == 25
assert result.structured_output.occupation == "dentist"