2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
39 #include <linux/dma-mapping.h>
46 #include "agent_priv.h"
50 spinlock_t ib_agent_port_list_lock;
51 static LIST_HEAD(ib_agent_port_list);
53 extern kmem_cache_t *ib_mad_cache;
57 * Caller must hold ib_agent_port_list_lock
59 static inline struct ib_agent_port_private *
60 __ib_get_agent_port(struct ib_device *device, int port_num,
61 struct ib_mad_agent *mad_agent)
63 struct ib_agent_port_private *entry;
65 BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */
68 list_for_each_entry(entry, &ib_agent_port_list, port_list) {
69 if (entry->dr_smp_agent->device == device &&
70 entry->port_num == port_num)
74 list_for_each_entry(entry, &ib_agent_port_list, port_list) {
75 if ((entry->dr_smp_agent == mad_agent) ||
76 (entry->lr_smp_agent == mad_agent) ||
77 (entry->perf_mgmt_agent == mad_agent))
84 static inline struct ib_agent_port_private *
85 ib_get_agent_port(struct ib_device *device, int port_num,
86 struct ib_mad_agent *mad_agent)
88 struct ib_agent_port_private *entry;
91 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
92 entry = __ib_get_agent_port(device, port_num, mad_agent);
93 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
98 int smi_check_local_dr_smp(struct ib_smp *smp,
99 struct ib_device *device,
102 struct ib_agent_port_private *port_priv;
104 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
106 port_priv = ib_get_agent_port(device, port_num, NULL);
108 printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d "
110 device->name, port_num);
114 return smi_check_local_smp(port_priv->dr_smp_agent, smp);
117 static int agent_mad_send(struct ib_mad_agent *mad_agent,
118 struct ib_agent_port_private *port_priv,
119 struct ib_mad_private *mad_priv,
123 struct ib_agent_send_wr *agent_send_wr;
124 struct ib_sge gather_list;
125 struct ib_send_wr send_wr;
126 struct ib_send_wr *bad_send_wr;
127 struct ib_ah_attr ah_attr;
131 agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL);
134 agent_send_wr->mad = mad_priv;
137 gather_list.addr = dma_map_single(mad_agent->device->dma_device,
139 sizeof(mad_priv->mad),
141 gather_list.length = sizeof(mad_priv->mad);
142 gather_list.lkey = (*port_priv->mr).lkey;
145 send_wr.opcode = IB_WR_SEND;
146 send_wr.sg_list = &gather_list;
148 send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */
149 send_wr.wr.ud.timeout_ms = 0;
150 send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
152 ah_attr.dlid = wc->slid;
153 ah_attr.port_num = mad_agent->port_num;
154 ah_attr.src_path_bits = wc->dlid_path_bits;
156 ah_attr.static_rate = 0;
157 ah_attr.ah_flags = 0; /* No GRH */
158 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
159 if (wc->wc_flags & IB_WC_GRH) {
160 ah_attr.ah_flags = IB_AH_GRH;
161 /* Should sgid be looked up ? */
162 ah_attr.grh.sgid_index = 0;
163 ah_attr.grh.hop_limit = grh->hop_limit;
164 ah_attr.grh.flow_label = be32_to_cpup(
165 &grh->version_tclass_flow) & 0xfffff;
166 ah_attr.grh.traffic_class = (be32_to_cpup(
167 &grh->version_tclass_flow) >> 20) & 0xff;
168 memcpy(ah_attr.grh.dgid.raw,
170 sizeof(ah_attr.grh.dgid));
174 agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr);
175 if (IS_ERR(agent_send_wr->ah)) {
176 printk(KERN_ERR SPFX "No memory for address handle\n");
177 kfree(agent_send_wr);
181 send_wr.wr.ud.ah = agent_send_wr->ah;
182 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
183 send_wr.wr.ud.pkey_index = wc->pkey_index;
184 send_wr.wr.ud.remote_qkey = IB_QP1_QKEY;
185 } else { /* for SMPs */
186 send_wr.wr.ud.pkey_index = 0;
187 send_wr.wr.ud.remote_qkey = 0;
189 send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr;
190 send_wr.wr_id = (unsigned long)agent_send_wr;
192 pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr);
195 spin_lock_irqsave(&port_priv->send_list_lock, flags);
196 if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) {
197 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
198 dma_unmap_single(mad_agent->device->dma_device,
199 pci_unmap_addr(agent_send_wr, mapping),
200 sizeof(mad_priv->mad),
202 ib_destroy_ah(agent_send_wr->ah);
203 kfree(agent_send_wr);
205 list_add_tail(&agent_send_wr->send_list,
206 &port_priv->send_posted_list);
207 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
215 int agent_send(struct ib_mad_private *mad,
218 struct ib_device *device,
221 struct ib_agent_port_private *port_priv;
222 struct ib_mad_agent *mad_agent;
224 port_priv = ib_get_agent_port(device, port_num, NULL);
226 printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n",
227 device->name, port_num);
231 /* Get mad agent based on mgmt_class in MAD */
232 switch (mad->mad.mad.mad_hdr.mgmt_class) {
233 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
234 mad_agent = port_priv->dr_smp_agent;
236 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
237 mad_agent = port_priv->lr_smp_agent;
239 case IB_MGMT_CLASS_PERF_MGMT:
240 mad_agent = port_priv->perf_mgmt_agent;
246 return agent_mad_send(mad_agent, port_priv, mad, grh, wc);
249 static void agent_send_handler(struct ib_mad_agent *mad_agent,
250 struct ib_mad_send_wc *mad_send_wc)
252 struct ib_agent_port_private *port_priv;
253 struct ib_agent_send_wr *agent_send_wr;
256 /* Find matching MAD agent */
257 port_priv = ib_get_agent_port(NULL, 0, mad_agent);
259 printk(KERN_ERR SPFX "agent_send_handler: no matching MAD "
260 "agent %p\n", mad_agent);
264 agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id;
265 spin_lock_irqsave(&port_priv->send_list_lock, flags);
266 /* Remove completed send from posted send MAD list */
267 list_del(&agent_send_wr->send_list);
268 spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
271 dma_unmap_single(mad_agent->device->dma_device,
272 pci_unmap_addr(agent_send_wr, mapping),
273 sizeof(agent_send_wr->mad->mad),
276 ib_destroy_ah(agent_send_wr->ah);
278 /* Release allocated memory */
279 kmem_cache_free(ib_mad_cache, agent_send_wr->mad);
280 kfree(agent_send_wr);
283 int ib_agent_port_open(struct ib_device *device, int port_num)
286 struct ib_agent_port_private *port_priv;
287 struct ib_mad_reg_req reg_req;
290 /* First, check if port already open for SMI */
291 port_priv = ib_get_agent_port(device, port_num, NULL);
293 printk(KERN_DEBUG SPFX "%s port %d already open\n",
294 device->name, port_num);
298 /* Create new device info */
299 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
301 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
306 memset(port_priv, 0, sizeof *port_priv);
307 port_priv->port_num = port_num;
308 spin_lock_init(&port_priv->send_list_lock);
309 INIT_LIST_HEAD(&port_priv->send_posted_list);
311 /* Obtain MAD agent for directed route SM class */
312 reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
313 reg_req.mgmt_class_version = 1;
315 port_priv->dr_smp_agent = ib_register_mad_agent(device, port_num,
321 if (IS_ERR(port_priv->dr_smp_agent)) {
322 ret = PTR_ERR(port_priv->dr_smp_agent);
326 /* Obtain MAD agent for LID routed SM class */
327 reg_req.mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
328 port_priv->lr_smp_agent = ib_register_mad_agent(device, port_num,
333 if (IS_ERR(port_priv->lr_smp_agent)) {
334 ret = PTR_ERR(port_priv->lr_smp_agent);
338 /* Obtain MAD agent for PerfMgmt class */
339 reg_req.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
340 port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
345 if (IS_ERR(port_priv->perf_mgmt_agent)) {
346 ret = PTR_ERR(port_priv->perf_mgmt_agent);
350 port_priv->mr = ib_get_dma_mr(port_priv->dr_smp_agent->qp->pd,
351 IB_ACCESS_LOCAL_WRITE);
352 if (IS_ERR(port_priv->mr)) {
353 printk(KERN_ERR SPFX "Couldn't get DMA MR\n");
354 ret = PTR_ERR(port_priv->mr);
358 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
359 list_add_tail(&port_priv->port_list, &ib_agent_port_list);
360 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
365 ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
367 ib_unregister_mad_agent(port_priv->lr_smp_agent);
369 ib_unregister_mad_agent(port_priv->dr_smp_agent);
376 int ib_agent_port_close(struct ib_device *device, int port_num)
378 struct ib_agent_port_private *port_priv;
381 spin_lock_irqsave(&ib_agent_port_list_lock, flags);
382 port_priv = __ib_get_agent_port(device, port_num, NULL);
383 if (port_priv == NULL) {
384 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
385 printk(KERN_ERR SPFX "Port %d not found\n", port_num);
388 list_del(&port_priv->port_list);
389 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
391 ib_dereg_mr(port_priv->mr);
393 ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
394 ib_unregister_mad_agent(port_priv->lr_smp_agent);
395 ib_unregister_mad_agent(port_priv->dr_smp_agent);